2011-01-05 Michael Snyder <msnyder@msnyder-server.eng.vmware.com>
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <limits.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
43
44 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
46 /* Backward compatibility for gdb without XML support. */
47
48 static const char *xmltarget_i386_linux_no_xml = "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
51 </target>";
52
53 #ifdef __x86_64__
54 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
57 </target>";
58 #endif
59
60 #include <sys/reg.h>
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
63 #include <sys/uio.h>
64
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
67 #endif
68
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
71 #endif
72
73
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
76 #endif
77
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
81 #endif
82
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85 #ifndef ARCH_GET_FS
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
90 #endif
91
92 /* Per-process arch-specific data we want to keep. */
93
94 struct arch_process_info
95 {
96 struct i386_debug_reg_state debug_reg_state;
97 };
98
99 /* Per-thread arch-specific data we want to keep. */
100
101 struct arch_lwp_info
102 {
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105 };
106
107 #ifdef __x86_64__
108
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap[] =
113 {
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118 };
119
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
124
125 static const int x86_64_regmap[] =
126 {
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138 };
139
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142 #else /* ! __x86_64__ */
143
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap[] =
147 {
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152 };
153
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156 #endif
157 \f
158 /* Called by libthread_db. */
159
160 ps_err_e
161 ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163 {
164 #ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184 #endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196 }
197
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203 static int
204 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205 {
206 #ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220 #endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp),
236 (void *) (long) idx, (unsigned long) &desc) < 0)
237 return -1;
238
239 *addr = desc[1];
240 return 0;
241 }
242 }
243
244
245 \f
246 static int
247 i386_cannot_store_register (int regno)
248 {
249 return regno >= I386_NUM_REGS;
250 }
251
252 static int
253 i386_cannot_fetch_register (int regno)
254 {
255 return regno >= I386_NUM_REGS;
256 }
257
258 static void
259 x86_fill_gregset (struct regcache *regcache, void *buf)
260 {
261 int i;
262
263 #ifdef __x86_64__
264 if (register_size (0) == 8)
265 {
266 for (i = 0; i < X86_64_NUM_REGS; i++)
267 if (x86_64_regmap[i] != -1)
268 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
269 return;
270 }
271 #endif
272
273 for (i = 0; i < I386_NUM_REGS; i++)
274 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
275
276 collect_register_by_name (regcache, "orig_eax",
277 ((char *) buf) + ORIG_EAX * 4);
278 }
279
280 static void
281 x86_store_gregset (struct regcache *regcache, const void *buf)
282 {
283 int i;
284
285 #ifdef __x86_64__
286 if (register_size (0) == 8)
287 {
288 for (i = 0; i < X86_64_NUM_REGS; i++)
289 if (x86_64_regmap[i] != -1)
290 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
291 return;
292 }
293 #endif
294
295 for (i = 0; i < I386_NUM_REGS; i++)
296 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
297
298 supply_register_by_name (regcache, "orig_eax",
299 ((char *) buf) + ORIG_EAX * 4);
300 }
301
302 static void
303 x86_fill_fpregset (struct regcache *regcache, void *buf)
304 {
305 #ifdef __x86_64__
306 i387_cache_to_fxsave (regcache, buf);
307 #else
308 i387_cache_to_fsave (regcache, buf);
309 #endif
310 }
311
312 static void
313 x86_store_fpregset (struct regcache *regcache, const void *buf)
314 {
315 #ifdef __x86_64__
316 i387_fxsave_to_cache (regcache, buf);
317 #else
318 i387_fsave_to_cache (regcache, buf);
319 #endif
320 }
321
322 #ifndef __x86_64__
323
324 static void
325 x86_fill_fpxregset (struct regcache *regcache, void *buf)
326 {
327 i387_cache_to_fxsave (regcache, buf);
328 }
329
330 static void
331 x86_store_fpxregset (struct regcache *regcache, const void *buf)
332 {
333 i387_fxsave_to_cache (regcache, buf);
334 }
335
336 #endif
337
338 static void
339 x86_fill_xstateregset (struct regcache *regcache, void *buf)
340 {
341 i387_cache_to_xsave (regcache, buf);
342 }
343
344 static void
345 x86_store_xstateregset (struct regcache *regcache, const void *buf)
346 {
347 i387_xsave_to_cache (regcache, buf);
348 }
349
350 /* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
358
359 struct regset_info target_regsets[] =
360 {
361 #ifdef HAVE_PTRACE_GETREGS
362 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
363 GENERAL_REGS,
364 x86_fill_gregset, x86_store_gregset },
365 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
366 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
367 # ifndef __x86_64__
368 # ifdef HAVE_PTRACE_GETFPXREGS
369 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
370 EXTENDED_REGS,
371 x86_fill_fpxregset, x86_store_fpxregset },
372 # endif
373 # endif
374 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
375 FP_REGS,
376 x86_fill_fpregset, x86_store_fpregset },
377 #endif /* HAVE_PTRACE_GETREGS */
378 { 0, 0, 0, -1, -1, NULL, NULL }
379 };
380
381 static CORE_ADDR
382 x86_get_pc (struct regcache *regcache)
383 {
384 int use_64bit = register_size (0) == 8;
385
386 if (use_64bit)
387 {
388 unsigned long pc;
389 collect_register_by_name (regcache, "rip", &pc);
390 return (CORE_ADDR) pc;
391 }
392 else
393 {
394 unsigned int pc;
395 collect_register_by_name (regcache, "eip", &pc);
396 return (CORE_ADDR) pc;
397 }
398 }
399
400 static void
401 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
402 {
403 int use_64bit = register_size (0) == 8;
404
405 if (use_64bit)
406 {
407 unsigned long newpc = pc;
408 supply_register_by_name (regcache, "rip", &newpc);
409 }
410 else
411 {
412 unsigned int newpc = pc;
413 supply_register_by_name (regcache, "eip", &newpc);
414 }
415 }
416 \f
417 static const unsigned char x86_breakpoint[] = { 0xCC };
418 #define x86_breakpoint_len 1
419
420 static int
421 x86_breakpoint_at (CORE_ADDR pc)
422 {
423 unsigned char c;
424
425 (*the_target->read_memory) (pc, &c, 1);
426 if (c == 0xCC)
427 return 1;
428
429 return 0;
430 }
431 \f
432 /* Support for debug registers. */
433
434 static unsigned long
435 x86_linux_dr_get (ptid_t ptid, int regnum)
436 {
437 int tid;
438 unsigned long value;
439
440 tid = ptid_get_lwp (ptid);
441
442 errno = 0;
443 value = ptrace (PTRACE_PEEKUSER, tid,
444 offsetof (struct user, u_debugreg[regnum]), 0);
445 if (errno != 0)
446 error ("Couldn't read debug register");
447
448 return value;
449 }
450
451 static void
452 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
453 {
454 int tid;
455
456 tid = ptid_get_lwp (ptid);
457
458 errno = 0;
459 ptrace (PTRACE_POKEUSER, tid,
460 offsetof (struct user, u_debugreg[regnum]), value);
461 if (errno != 0)
462 error ("Couldn't write debug register");
463 }
464
465 static int
466 update_debug_registers_callback (struct inferior_list_entry *entry,
467 void *pid_p)
468 {
469 struct lwp_info *lwp = (struct lwp_info *) entry;
470 int pid = *(int *) pid_p;
471
472 /* Only update the threads of this process. */
473 if (pid_of (lwp) == pid)
474 {
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp->arch_private->debug_registers_changed = 1;
478
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
481 if (!lwp->stopped)
482 linux_stop_lwp (lwp);
483 }
484
485 return 0;
486 }
487
488 /* Update the inferior's debug register REGNUM from STATE. */
489
490 void
491 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
492 {
493 /* Only update the threads of this process. */
494 int pid = pid_of (get_thread_lwp (current_inferior));
495
496 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
497 fatal ("Invalid debug register %d", regnum);
498
499 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
500 }
501
502 /* Return the inferior's debug register REGNUM. */
503
504 CORE_ADDR
505 i386_dr_low_get_addr (int regnum)
506 {
507 struct lwp_info *lwp = get_thread_lwp (current_inferior);
508 ptid_t ptid = ptid_of (lwp);
509
510 /* DR6 and DR7 are retrieved with some other way. */
511 gdb_assert (DR_FIRSTADDR <= regnum && regnum < DR_LASTADDR);
512
513 return x86_linux_dr_get (ptid, regnum);
514 }
515
516 /* Update the inferior's DR7 debug control register from STATE. */
517
518 void
519 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
520 {
521 /* Only update the threads of this process. */
522 int pid = pid_of (get_thread_lwp (current_inferior));
523
524 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
525 }
526
527 /* Return the inferior's DR7 debug control register. */
528
529 unsigned
530 i386_dr_low_get_control (void)
531 {
532 struct lwp_info *lwp = get_thread_lwp (current_inferior);
533 ptid_t ptid = ptid_of (lwp);
534
535 return x86_linux_dr_get (ptid, DR_CONTROL);
536 }
537
538 /* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
540
541 unsigned
542 i386_dr_low_get_status (void)
543 {
544 struct lwp_info *lwp = get_thread_lwp (current_inferior);
545 ptid_t ptid = ptid_of (lwp);
546
547 return x86_linux_dr_get (ptid, DR_STATUS);
548 }
549 \f
550 /* Breakpoint/Watchpoint support. */
551
552 static int
553 x86_insert_point (char type, CORE_ADDR addr, int len)
554 {
555 struct process_info *proc = current_process ();
556 switch (type)
557 {
558 case '0':
559 {
560 int ret;
561
562 ret = prepare_to_access_memory ();
563 if (ret)
564 return -1;
565 ret = set_gdb_breakpoint_at (addr);
566 done_accessing_memory ();
567 return ret;
568 }
569 case '2':
570 case '3':
571 case '4':
572 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
573 type, addr, len);
574 default:
575 /* Unsupported. */
576 return 1;
577 }
578 }
579
580 static int
581 x86_remove_point (char type, CORE_ADDR addr, int len)
582 {
583 struct process_info *proc = current_process ();
584 switch (type)
585 {
586 case '0':
587 {
588 int ret;
589
590 ret = prepare_to_access_memory ();
591 if (ret)
592 return -1;
593 ret = delete_gdb_breakpoint_at (addr);
594 done_accessing_memory ();
595 return ret;
596 }
597 case '2':
598 case '3':
599 case '4':
600 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
601 type, addr, len);
602 default:
603 /* Unsupported. */
604 return 1;
605 }
606 }
607
608 static int
609 x86_stopped_by_watchpoint (void)
610 {
611 struct process_info *proc = current_process ();
612 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
613 }
614
615 static CORE_ADDR
616 x86_stopped_data_address (void)
617 {
618 struct process_info *proc = current_process ();
619 CORE_ADDR addr;
620 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
621 &addr))
622 return addr;
623 return 0;
624 }
625 \f
626 /* Called when a new process is created. */
627
628 static struct arch_process_info *
629 x86_linux_new_process (void)
630 {
631 struct arch_process_info *info = xcalloc (1, sizeof (*info));
632
633 i386_low_init_dregs (&info->debug_reg_state);
634
635 return info;
636 }
637
638 /* Called when a new thread is detected. */
639
640 static struct arch_lwp_info *
641 x86_linux_new_thread (void)
642 {
643 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
644
645 info->debug_registers_changed = 1;
646
647 return info;
648 }
649
650 /* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
652
653 static void
654 x86_linux_prepare_to_resume (struct lwp_info *lwp)
655 {
656 ptid_t ptid = ptid_of (lwp);
657
658 if (lwp->arch_private->debug_registers_changed)
659 {
660 int i;
661 int pid = ptid_get_pid (ptid);
662 struct process_info *proc = find_process_pid (pid);
663 struct i386_debug_reg_state *state
664 = &proc->private->arch_private->debug_reg_state;
665
666 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
667 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
668
669 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
670
671 lwp->arch_private->debug_registers_changed = 0;
672 }
673
674 if (lwp->stopped_by_watchpoint)
675 x86_linux_dr_set (ptid, DR_STATUS, 0);
676 }
677 \f
678 /* When GDBSERVER is built as a 64-bit application on linux, the
679 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
680 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
681 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
682 conversion in-place ourselves. */
683
684 /* These types below (compat_*) define a siginfo type that is layout
685 compatible with the siginfo type exported by the 32-bit userspace
686 support. */
687
688 #ifdef __x86_64__
689
690 typedef int compat_int_t;
691 typedef unsigned int compat_uptr_t;
692
693 typedef int compat_time_t;
694 typedef int compat_timer_t;
695 typedef int compat_clock_t;
696
697 struct compat_timeval
698 {
699 compat_time_t tv_sec;
700 int tv_usec;
701 };
702
703 typedef union compat_sigval
704 {
705 compat_int_t sival_int;
706 compat_uptr_t sival_ptr;
707 } compat_sigval_t;
708
709 typedef struct compat_siginfo
710 {
711 int si_signo;
712 int si_errno;
713 int si_code;
714
715 union
716 {
717 int _pad[((128 / sizeof (int)) - 3)];
718
719 /* kill() */
720 struct
721 {
722 unsigned int _pid;
723 unsigned int _uid;
724 } _kill;
725
726 /* POSIX.1b timers */
727 struct
728 {
729 compat_timer_t _tid;
730 int _overrun;
731 compat_sigval_t _sigval;
732 } _timer;
733
734 /* POSIX.1b signals */
735 struct
736 {
737 unsigned int _pid;
738 unsigned int _uid;
739 compat_sigval_t _sigval;
740 } _rt;
741
742 /* SIGCHLD */
743 struct
744 {
745 unsigned int _pid;
746 unsigned int _uid;
747 int _status;
748 compat_clock_t _utime;
749 compat_clock_t _stime;
750 } _sigchld;
751
752 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
753 struct
754 {
755 unsigned int _addr;
756 } _sigfault;
757
758 /* SIGPOLL */
759 struct
760 {
761 int _band;
762 int _fd;
763 } _sigpoll;
764 } _sifields;
765 } compat_siginfo_t;
766
767 #define cpt_si_pid _sifields._kill._pid
768 #define cpt_si_uid _sifields._kill._uid
769 #define cpt_si_timerid _sifields._timer._tid
770 #define cpt_si_overrun _sifields._timer._overrun
771 #define cpt_si_status _sifields._sigchld._status
772 #define cpt_si_utime _sifields._sigchld._utime
773 #define cpt_si_stime _sifields._sigchld._stime
774 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
775 #define cpt_si_addr _sifields._sigfault._addr
776 #define cpt_si_band _sifields._sigpoll._band
777 #define cpt_si_fd _sifields._sigpoll._fd
778
779 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
780 In their place is si_timer1,si_timer2. */
781 #ifndef si_timerid
782 #define si_timerid si_timer1
783 #endif
784 #ifndef si_overrun
785 #define si_overrun si_timer2
786 #endif
787
788 static void
789 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
790 {
791 memset (to, 0, sizeof (*to));
792
793 to->si_signo = from->si_signo;
794 to->si_errno = from->si_errno;
795 to->si_code = from->si_code;
796
797 if (to->si_code == SI_TIMER)
798 {
799 to->cpt_si_timerid = from->si_timerid;
800 to->cpt_si_overrun = from->si_overrun;
801 to->cpt_si_ptr = (intptr_t) from->si_ptr;
802 }
803 else if (to->si_code == SI_USER)
804 {
805 to->cpt_si_pid = from->si_pid;
806 to->cpt_si_uid = from->si_uid;
807 }
808 else if (to->si_code < 0)
809 {
810 to->cpt_si_pid = from->si_pid;
811 to->cpt_si_uid = from->si_uid;
812 to->cpt_si_ptr = (intptr_t) from->si_ptr;
813 }
814 else
815 {
816 switch (to->si_signo)
817 {
818 case SIGCHLD:
819 to->cpt_si_pid = from->si_pid;
820 to->cpt_si_uid = from->si_uid;
821 to->cpt_si_status = from->si_status;
822 to->cpt_si_utime = from->si_utime;
823 to->cpt_si_stime = from->si_stime;
824 break;
825 case SIGILL:
826 case SIGFPE:
827 case SIGSEGV:
828 case SIGBUS:
829 to->cpt_si_addr = (intptr_t) from->si_addr;
830 break;
831 case SIGPOLL:
832 to->cpt_si_band = from->si_band;
833 to->cpt_si_fd = from->si_fd;
834 break;
835 default:
836 to->cpt_si_pid = from->si_pid;
837 to->cpt_si_uid = from->si_uid;
838 to->cpt_si_ptr = (intptr_t) from->si_ptr;
839 break;
840 }
841 }
842 }
843
844 static void
845 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
846 {
847 memset (to, 0, sizeof (*to));
848
849 to->si_signo = from->si_signo;
850 to->si_errno = from->si_errno;
851 to->si_code = from->si_code;
852
853 if (to->si_code == SI_TIMER)
854 {
855 to->si_timerid = from->cpt_si_timerid;
856 to->si_overrun = from->cpt_si_overrun;
857 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
858 }
859 else if (to->si_code == SI_USER)
860 {
861 to->si_pid = from->cpt_si_pid;
862 to->si_uid = from->cpt_si_uid;
863 }
864 else if (to->si_code < 0)
865 {
866 to->si_pid = from->cpt_si_pid;
867 to->si_uid = from->cpt_si_uid;
868 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
869 }
870 else
871 {
872 switch (to->si_signo)
873 {
874 case SIGCHLD:
875 to->si_pid = from->cpt_si_pid;
876 to->si_uid = from->cpt_si_uid;
877 to->si_status = from->cpt_si_status;
878 to->si_utime = from->cpt_si_utime;
879 to->si_stime = from->cpt_si_stime;
880 break;
881 case SIGILL:
882 case SIGFPE:
883 case SIGSEGV:
884 case SIGBUS:
885 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
886 break;
887 case SIGPOLL:
888 to->si_band = from->cpt_si_band;
889 to->si_fd = from->cpt_si_fd;
890 break;
891 default:
892 to->si_pid = from->cpt_si_pid;
893 to->si_uid = from->cpt_si_uid;
894 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
895 break;
896 }
897 }
898 }
899
900 #endif /* __x86_64__ */
901
902 /* Convert a native/host siginfo object, into/from the siginfo in the
903 layout of the inferiors' architecture. Returns true if any
904 conversion was done; false otherwise. If DIRECTION is 1, then copy
905 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
906 INF. */
907
908 static int
909 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
910 {
911 #ifdef __x86_64__
912 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
913 if (register_size (0) == 4)
914 {
915 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
916 fatal ("unexpected difference in siginfo");
917
918 if (direction == 0)
919 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
920 else
921 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
922
923 return 1;
924 }
925 #endif
926
927 return 0;
928 }
929 \f
930 static int use_xml;
931
932 /* Update gdbserver_xmltarget. */
933
934 static void
935 x86_linux_update_xmltarget (void)
936 {
937 int pid;
938 struct regset_info *regset;
939 static unsigned long long xcr0;
940 static int have_ptrace_getregset = -1;
941 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
942 static int have_ptrace_getfpxregs = -1;
943 #endif
944
945 if (!current_inferior)
946 return;
947
948 /* Before changing the register cache internal layout or the target
949 regsets, flush the contents of the current valid caches back to
950 the threads. */
951 regcache_invalidate ();
952
953 pid = pid_of (get_thread_lwp (current_inferior));
954 #ifdef __x86_64__
955 if (num_xmm_registers == 8)
956 init_registers_i386_linux ();
957 else
958 init_registers_amd64_linux ();
959 #else
960 {
961 # ifdef HAVE_PTRACE_GETFPXREGS
962 if (have_ptrace_getfpxregs == -1)
963 {
964 elf_fpxregset_t fpxregs;
965
966 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
967 {
968 have_ptrace_getfpxregs = 0;
969 x86_xcr0 = I386_XSTATE_X87_MASK;
970
971 /* Disable PTRACE_GETFPXREGS. */
972 for (regset = target_regsets;
973 regset->fill_function != NULL; regset++)
974 if (regset->get_request == PTRACE_GETFPXREGS)
975 {
976 regset->size = 0;
977 break;
978 }
979 }
980 else
981 have_ptrace_getfpxregs = 1;
982 }
983
984 if (!have_ptrace_getfpxregs)
985 {
986 init_registers_i386_mmx_linux ();
987 return;
988 }
989 # endif
990 init_registers_i386_linux ();
991 }
992 #endif
993
994 if (!use_xml)
995 {
996 /* Don't use XML. */
997 #ifdef __x86_64__
998 if (num_xmm_registers == 8)
999 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1000 else
1001 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
1002 #else
1003 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
1004 #endif
1005
1006 x86_xcr0 = I386_XSTATE_SSE_MASK;
1007
1008 return;
1009 }
1010
1011 /* Check if XSAVE extended state is supported. */
1012 if (have_ptrace_getregset == -1)
1013 {
1014 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1015 struct iovec iov;
1016
1017 iov.iov_base = xstateregs;
1018 iov.iov_len = sizeof (xstateregs);
1019
1020 /* Check if PTRACE_GETREGSET works. */
1021 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1022 &iov) < 0)
1023 {
1024 have_ptrace_getregset = 0;
1025 return;
1026 }
1027 else
1028 have_ptrace_getregset = 1;
1029
1030 /* Get XCR0 from XSAVE extended state at byte 464. */
1031 xcr0 = xstateregs[464 / sizeof (long long)];
1032
1033 /* Use PTRACE_GETREGSET if it is available. */
1034 for (regset = target_regsets;
1035 regset->fill_function != NULL; regset++)
1036 if (regset->get_request == PTRACE_GETREGSET)
1037 regset->size = I386_XSTATE_SIZE (xcr0);
1038 else if (regset->type != GENERAL_REGS)
1039 regset->size = 0;
1040 }
1041
1042 if (have_ptrace_getregset)
1043 {
1044 /* AVX is the highest feature we support. */
1045 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1046 {
1047 x86_xcr0 = xcr0;
1048
1049 #ifdef __x86_64__
1050 /* I386 has 8 xmm regs. */
1051 if (num_xmm_registers == 8)
1052 init_registers_i386_avx_linux ();
1053 else
1054 init_registers_amd64_avx_linux ();
1055 #else
1056 init_registers_i386_avx_linux ();
1057 #endif
1058 }
1059 }
1060 }
1061
1062 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1063 PTRACE_GETREGSET. */
1064
1065 static void
1066 x86_linux_process_qsupported (const char *query)
1067 {
1068 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1069 with "i386" in qSupported query, it supports x86 XML target
1070 descriptions. */
1071 use_xml = 0;
1072 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1073 {
1074 char *copy = xstrdup (query + 13);
1075 char *p;
1076
1077 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1078 {
1079 if (strcmp (p, "i386") == 0)
1080 {
1081 use_xml = 1;
1082 break;
1083 }
1084 }
1085
1086 free (copy);
1087 }
1088
1089 x86_linux_update_xmltarget ();
1090 }
1091
1092 /* Initialize gdbserver for the architecture of the inferior. */
1093
1094 static void
1095 x86_arch_setup (void)
1096 {
1097 #ifdef __x86_64__
1098 int pid = pid_of (get_thread_lwp (current_inferior));
1099 char *file = linux_child_pid_to_exec_file (pid);
1100 int use_64bit = elf_64_file_p (file);
1101
1102 free (file);
1103
1104 if (use_64bit < 0)
1105 {
1106 /* This can only happen if /proc/<pid>/exe is unreadable,
1107 but "that can't happen" if we've gotten this far.
1108 Fall through and assume this is a 32-bit program. */
1109 }
1110 else if (use_64bit)
1111 {
1112 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1113 the_low_target.num_regs = -1;
1114 the_low_target.regmap = NULL;
1115 the_low_target.cannot_fetch_register = NULL;
1116 the_low_target.cannot_store_register = NULL;
1117
1118 /* Amd64 has 16 xmm regs. */
1119 num_xmm_registers = 16;
1120
1121 x86_linux_update_xmltarget ();
1122 return;
1123 }
1124 #endif
1125
1126 /* Ok we have a 32-bit inferior. */
1127
1128 the_low_target.num_regs = I386_NUM_REGS;
1129 the_low_target.regmap = i386_regmap;
1130 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1131 the_low_target.cannot_store_register = i386_cannot_store_register;
1132
1133 /* I386 has 8 xmm regs. */
1134 num_xmm_registers = 8;
1135
1136 x86_linux_update_xmltarget ();
1137 }
1138
1139 static int
1140 x86_supports_tracepoints (void)
1141 {
1142 return 1;
1143 }
1144
1145 static void
1146 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1147 {
1148 write_inferior_memory (*to, buf, len);
1149 *to += len;
1150 }
1151
1152 static int
1153 push_opcode (unsigned char *buf, char *op)
1154 {
1155 unsigned char *buf_org = buf;
1156
1157 while (1)
1158 {
1159 char *endptr;
1160 unsigned long ul = strtoul (op, &endptr, 16);
1161
1162 if (endptr == op)
1163 break;
1164
1165 *buf++ = ul;
1166 op = endptr;
1167 }
1168
1169 return buf - buf_org;
1170 }
1171
1172 #ifdef __x86_64__
1173
1174 /* Build a jump pad that saves registers and calls a collection
1175 function. Writes a jump instruction to the jump pad to
1176 JJUMPAD_INSN. The caller is responsible to write it in at the
1177 tracepoint address. */
1178
1179 static int
1180 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1181 CORE_ADDR collector,
1182 CORE_ADDR lockaddr,
1183 ULONGEST orig_size,
1184 CORE_ADDR *jump_entry,
1185 unsigned char *jjump_pad_insn,
1186 ULONGEST *jjump_pad_insn_size,
1187 CORE_ADDR *adjusted_insn_addr,
1188 CORE_ADDR *adjusted_insn_addr_end)
1189 {
1190 unsigned char buf[40];
1191 int i, offset;
1192 CORE_ADDR buildaddr = *jump_entry;
1193
1194 /* Build the jump pad. */
1195
1196 /* First, do tracepoint data collection. Save registers. */
1197 i = 0;
1198 /* Need to ensure stack pointer saved first. */
1199 buf[i++] = 0x54; /* push %rsp */
1200 buf[i++] = 0x55; /* push %rbp */
1201 buf[i++] = 0x57; /* push %rdi */
1202 buf[i++] = 0x56; /* push %rsi */
1203 buf[i++] = 0x52; /* push %rdx */
1204 buf[i++] = 0x51; /* push %rcx */
1205 buf[i++] = 0x53; /* push %rbx */
1206 buf[i++] = 0x50; /* push %rax */
1207 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1208 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1209 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1210 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1211 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1212 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1213 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1214 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1215 buf[i++] = 0x9c; /* pushfq */
1216 buf[i++] = 0x48; /* movl <addr>,%rdi */
1217 buf[i++] = 0xbf;
1218 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1219 i += sizeof (unsigned long);
1220 buf[i++] = 0x57; /* push %rdi */
1221 append_insns (&buildaddr, i, buf);
1222
1223 /* Stack space for the collecting_t object. */
1224 i = 0;
1225 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1226 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1227 memcpy (buf + i, &tpoint, 8);
1228 i += 8;
1229 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1230 i += push_opcode (&buf[i],
1231 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1232 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1233 append_insns (&buildaddr, i, buf);
1234
1235 /* spin-lock. */
1236 i = 0;
1237 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1238 memcpy (&buf[i], (void *) &lockaddr, 8);
1239 i += 8;
1240 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1241 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1242 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1243 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1244 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1245 append_insns (&buildaddr, i, buf);
1246
1247 /* Set up the gdb_collect call. */
1248 /* At this point, (stack pointer + 0x18) is the base of our saved
1249 register block. */
1250
1251 i = 0;
1252 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1253 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1254
1255 /* tpoint address may be 64-bit wide. */
1256 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1257 memcpy (buf + i, &tpoint, 8);
1258 i += 8;
1259 append_insns (&buildaddr, i, buf);
1260
1261 /* The collector function being in the shared library, may be
1262 >31-bits away off the jump pad. */
1263 i = 0;
1264 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1265 memcpy (buf + i, &collector, 8);
1266 i += 8;
1267 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1268 append_insns (&buildaddr, i, buf);
1269
1270 /* Clear the spin-lock. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1273 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1274 memcpy (buf + i, &lockaddr, 8);
1275 i += 8;
1276 append_insns (&buildaddr, i, buf);
1277
1278 /* Remove stack that had been used for the collect_t object. */
1279 i = 0;
1280 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1281 append_insns (&buildaddr, i, buf);
1282
1283 /* Restore register state. */
1284 i = 0;
1285 buf[i++] = 0x48; /* add $0x8,%rsp */
1286 buf[i++] = 0x83;
1287 buf[i++] = 0xc4;
1288 buf[i++] = 0x08;
1289 buf[i++] = 0x9d; /* popfq */
1290 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1291 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1292 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1293 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1294 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1295 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1296 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1297 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1298 buf[i++] = 0x58; /* pop %rax */
1299 buf[i++] = 0x5b; /* pop %rbx */
1300 buf[i++] = 0x59; /* pop %rcx */
1301 buf[i++] = 0x5a; /* pop %rdx */
1302 buf[i++] = 0x5e; /* pop %rsi */
1303 buf[i++] = 0x5f; /* pop %rdi */
1304 buf[i++] = 0x5d; /* pop %rbp */
1305 buf[i++] = 0x5c; /* pop %rsp */
1306 append_insns (&buildaddr, i, buf);
1307
1308 /* Now, adjust the original instruction to execute in the jump
1309 pad. */
1310 *adjusted_insn_addr = buildaddr;
1311 relocate_instruction (&buildaddr, tpaddr);
1312 *adjusted_insn_addr_end = buildaddr;
1313
1314 /* Finally, write a jump back to the program. */
1315 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1316 memcpy (buf, jump_insn, sizeof (jump_insn));
1317 memcpy (buf + 1, &offset, 4);
1318 append_insns (&buildaddr, sizeof (jump_insn), buf);
1319
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
1324 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325 memcpy (buf, jump_insn, sizeof (jump_insn));
1326 memcpy (buf + 1, &offset, 4);
1327 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1328 *jjump_pad_insn_size = sizeof (jump_insn);
1329
1330 /* Return the end address of our pad. */
1331 *jump_entry = buildaddr;
1332
1333 return 0;
1334 }
1335
1336 #endif /* __x86_64__ */
1337
1338 /* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1342
1343 static int
1344 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1345 CORE_ADDR collector,
1346 CORE_ADDR lockaddr,
1347 ULONGEST orig_size,
1348 CORE_ADDR *jump_entry,
1349 unsigned char *jjump_pad_insn,
1350 ULONGEST *jjump_pad_insn_size,
1351 CORE_ADDR *adjusted_insn_addr,
1352 CORE_ADDR *adjusted_insn_addr_end)
1353 {
1354 unsigned char buf[0x100];
1355 int i, offset;
1356 CORE_ADDR buildaddr = *jump_entry;
1357
1358 /* Build the jump pad. */
1359
1360 /* First, do tracepoint data collection. Save registers. */
1361 i = 0;
1362 buf[i++] = 0x60; /* pushad */
1363 buf[i++] = 0x68; /* push tpaddr aka $pc */
1364 *((int *)(buf + i)) = (int) tpaddr;
1365 i += 4;
1366 buf[i++] = 0x9c; /* pushf */
1367 buf[i++] = 0x1e; /* push %ds */
1368 buf[i++] = 0x06; /* push %es */
1369 buf[i++] = 0x0f; /* push %fs */
1370 buf[i++] = 0xa0;
1371 buf[i++] = 0x0f; /* push %gs */
1372 buf[i++] = 0xa8;
1373 buf[i++] = 0x16; /* push %ss */
1374 buf[i++] = 0x0e; /* push %cs */
1375 append_insns (&buildaddr, i, buf);
1376
1377 /* Stack space for the collecting_t object. */
1378 i = 0;
1379 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1380
1381 /* Build the object. */
1382 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1383 memcpy (buf + i, &tpoint, 4);
1384 i += 4;
1385 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1386
1387 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1388 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1389 append_insns (&buildaddr, i, buf);
1390
1391 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1392 If we cared for it, this could be using xchg alternatively. */
1393
1394 i = 0;
1395 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1396 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1397 %esp,<lockaddr> */
1398 memcpy (&buf[i], (void *) &lockaddr, 4);
1399 i += 4;
1400 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1401 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1402 append_insns (&buildaddr, i, buf);
1403
1404
1405 /* Set up arguments to the gdb_collect call. */
1406 i = 0;
1407 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1408 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1409 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1410 append_insns (&buildaddr, i, buf);
1411
1412 i = 0;
1413 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1414 append_insns (&buildaddr, i, buf);
1415
1416 i = 0;
1417 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1418 memcpy (&buf[i], (void *) &tpoint, 4);
1419 i += 4;
1420 append_insns (&buildaddr, i, buf);
1421
1422 buf[0] = 0xe8; /* call <reladdr> */
1423 offset = collector - (buildaddr + sizeof (jump_insn));
1424 memcpy (buf + 1, &offset, 4);
1425 append_insns (&buildaddr, 5, buf);
1426 /* Clean up after the call. */
1427 buf[0] = 0x83; /* add $0x8,%esp */
1428 buf[1] = 0xc4;
1429 buf[2] = 0x08;
1430 append_insns (&buildaddr, 3, buf);
1431
1432
1433 /* Clear the spin-lock. This would need the LOCK prefix on older
1434 broken archs. */
1435 i = 0;
1436 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1437 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1438 memcpy (buf + i, &lockaddr, 4);
1439 i += 4;
1440 append_insns (&buildaddr, i, buf);
1441
1442
1443 /* Remove stack that had been used for the collect_t object. */
1444 i = 0;
1445 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1446 append_insns (&buildaddr, i, buf);
1447
1448 i = 0;
1449 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1450 buf[i++] = 0xc4;
1451 buf[i++] = 0x04;
1452 buf[i++] = 0x17; /* pop %ss */
1453 buf[i++] = 0x0f; /* pop %gs */
1454 buf[i++] = 0xa9;
1455 buf[i++] = 0x0f; /* pop %fs */
1456 buf[i++] = 0xa1;
1457 buf[i++] = 0x07; /* pop %es */
1458 buf[i++] = 0x1f; /* pop %de */
1459 buf[i++] = 0x9d; /* popf */
1460 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1461 buf[i++] = 0xc4;
1462 buf[i++] = 0x04;
1463 buf[i++] = 0x61; /* popad */
1464 append_insns (&buildaddr, i, buf);
1465
1466 /* Now, adjust the original instruction to execute in the jump
1467 pad. */
1468 *adjusted_insn_addr = buildaddr;
1469 relocate_instruction (&buildaddr, tpaddr);
1470 *adjusted_insn_addr_end = buildaddr;
1471
1472 /* Write the jump back to the program. */
1473 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1474 memcpy (buf, jump_insn, sizeof (jump_insn));
1475 memcpy (buf + 1, &offset, 4);
1476 append_insns (&buildaddr, sizeof (jump_insn), buf);
1477
1478 /* The jump pad is now built. Wire in a jump to our jump pad. This
1479 is always done last (by our caller actually), so that we can
1480 install fast tracepoints with threads running. This relies on
1481 the agent's atomic write support. */
1482 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1483 memcpy (buf, jump_insn, sizeof (jump_insn));
1484 memcpy (buf + 1, &offset, 4);
1485 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1486 *jjump_pad_insn_size = sizeof (jump_insn);
1487
1488 /* Return the end address of our pad. */
1489 *jump_entry = buildaddr;
1490
1491 return 0;
1492 }
1493
1494 static int
1495 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1496 CORE_ADDR collector,
1497 CORE_ADDR lockaddr,
1498 ULONGEST orig_size,
1499 CORE_ADDR *jump_entry,
1500 unsigned char *jjump_pad_insn,
1501 ULONGEST *jjump_pad_insn_size,
1502 CORE_ADDR *adjusted_insn_addr,
1503 CORE_ADDR *adjusted_insn_addr_end)
1504 {
1505 #ifdef __x86_64__
1506 if (register_size (0) == 8)
1507 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1508 collector, lockaddr,
1509 orig_size, jump_entry,
1510 jjump_pad_insn,
1511 jjump_pad_insn_size,
1512 adjusted_insn_addr,
1513 adjusted_insn_addr_end);
1514 #endif
1515
1516 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1517 collector, lockaddr,
1518 orig_size, jump_entry,
1519 jjump_pad_insn,
1520 jjump_pad_insn_size,
1521 adjusted_insn_addr,
1522 adjusted_insn_addr_end);
1523 }
1524
1525 static void
1526 add_insns (unsigned char *start, int len)
1527 {
1528 CORE_ADDR buildaddr = current_insn_ptr;
1529
1530 if (debug_threads)
1531 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1532 len, paddress (buildaddr));
1533
1534 append_insns (&buildaddr, len, start);
1535 current_insn_ptr = buildaddr;
1536 }
1537
1538 /* Our general strategy for emitting code is to avoid specifying raw
1539 bytes whenever possible, and instead copy a block of inline asm
1540 that is embedded in the function. This is a little messy, because
1541 we need to keep the compiler from discarding what looks like dead
1542 code, plus suppress various warnings. */
1543
1544 #define EMIT_ASM(NAME, INSNS) \
1545 do \
1546 { \
1547 extern unsigned char start_ ## NAME, end_ ## NAME; \
1548 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1549 __asm__ ("jmp end_" #NAME "\n" \
1550 "\t" "start_" #NAME ":" \
1551 "\t" INSNS "\n" \
1552 "\t" "end_" #NAME ":"); \
1553 } while (0)
1554
1555 #ifdef __x86_64__
1556
1557 #define EMIT_ASM32(NAME,INSNS) \
1558 do \
1559 { \
1560 extern unsigned char start_ ## NAME, end_ ## NAME; \
1561 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1562 __asm__ (".code32\n" \
1563 "\t" "jmp end_" #NAME "\n" \
1564 "\t" "start_" #NAME ":\n" \
1565 "\t" INSNS "\n" \
1566 "\t" "end_" #NAME ":\n" \
1567 ".code64\n"); \
1568 } while (0)
1569
1570 #else
1571
1572 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1573
1574 #endif
1575
1576 #ifdef __x86_64__
1577
1578 static void
1579 amd64_emit_prologue (void)
1580 {
1581 EMIT_ASM (amd64_prologue,
1582 "pushq %rbp\n\t"
1583 "movq %rsp,%rbp\n\t"
1584 "sub $0x20,%rsp\n\t"
1585 "movq %rdi,-8(%rbp)\n\t"
1586 "movq %rsi,-16(%rbp)");
1587 }
1588
1589
1590 static void
1591 amd64_emit_epilogue (void)
1592 {
1593 EMIT_ASM (amd64_epilogue,
1594 "movq -16(%rbp),%rdi\n\t"
1595 "movq %rax,(%rdi)\n\t"
1596 "xor %rax,%rax\n\t"
1597 "leave\n\t"
1598 "ret");
1599 }
1600
1601 static void
1602 amd64_emit_add (void)
1603 {
1604 EMIT_ASM (amd64_add,
1605 "add (%rsp),%rax\n\t"
1606 "lea 0x8(%rsp),%rsp");
1607 }
1608
1609 static void
1610 amd64_emit_sub (void)
1611 {
1612 EMIT_ASM (amd64_sub,
1613 "sub %rax,(%rsp)\n\t"
1614 "pop %rax");
1615 }
1616
1617 static void
1618 amd64_emit_mul (void)
1619 {
1620 emit_error = 1;
1621 }
1622
1623 static void
1624 amd64_emit_lsh (void)
1625 {
1626 emit_error = 1;
1627 }
1628
1629 static void
1630 amd64_emit_rsh_signed (void)
1631 {
1632 emit_error = 1;
1633 }
1634
1635 static void
1636 amd64_emit_rsh_unsigned (void)
1637 {
1638 emit_error = 1;
1639 }
1640
1641 static void
1642 amd64_emit_ext (int arg)
1643 {
1644 switch (arg)
1645 {
1646 case 8:
1647 EMIT_ASM (amd64_ext_8,
1648 "cbtw\n\t"
1649 "cwtl\n\t"
1650 "cltq");
1651 break;
1652 case 16:
1653 EMIT_ASM (amd64_ext_16,
1654 "cwtl\n\t"
1655 "cltq");
1656 break;
1657 case 32:
1658 EMIT_ASM (amd64_ext_32,
1659 "cltq");
1660 break;
1661 default:
1662 emit_error = 1;
1663 }
1664 }
1665
1666 static void
1667 amd64_emit_log_not (void)
1668 {
1669 EMIT_ASM (amd64_log_not,
1670 "test %rax,%rax\n\t"
1671 "sete %cl\n\t"
1672 "movzbq %cl,%rax");
1673 }
1674
1675 static void
1676 amd64_emit_bit_and (void)
1677 {
1678 EMIT_ASM (amd64_and,
1679 "and (%rsp),%rax\n\t"
1680 "lea 0x8(%rsp),%rsp");
1681 }
1682
1683 static void
1684 amd64_emit_bit_or (void)
1685 {
1686 EMIT_ASM (amd64_or,
1687 "or (%rsp),%rax\n\t"
1688 "lea 0x8(%rsp),%rsp");
1689 }
1690
1691 static void
1692 amd64_emit_bit_xor (void)
1693 {
1694 EMIT_ASM (amd64_xor,
1695 "xor (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1697 }
1698
1699 static void
1700 amd64_emit_bit_not (void)
1701 {
1702 EMIT_ASM (amd64_bit_not,
1703 "xorq $0xffffffffffffffff,%rax");
1704 }
1705
1706 static void
1707 amd64_emit_equal (void)
1708 {
1709 EMIT_ASM (amd64_equal,
1710 "cmp %rax,(%rsp)\n\t"
1711 "je .Lamd64_equal_true\n\t"
1712 "xor %rax,%rax\n\t"
1713 "jmp .Lamd64_equal_end\n\t"
1714 ".Lamd64_equal_true:\n\t"
1715 "mov $0x1,%rax\n\t"
1716 ".Lamd64_equal_end:\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718 }
1719
1720 static void
1721 amd64_emit_less_signed (void)
1722 {
1723 EMIT_ASM (amd64_less_signed,
1724 "cmp %rax,(%rsp)\n\t"
1725 "jl .Lamd64_less_signed_true\n\t"
1726 "xor %rax,%rax\n\t"
1727 "jmp .Lamd64_less_signed_end\n\t"
1728 ".Lamd64_less_signed_true:\n\t"
1729 "mov $1,%rax\n\t"
1730 ".Lamd64_less_signed_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732 }
1733
1734 static void
1735 amd64_emit_less_unsigned (void)
1736 {
1737 EMIT_ASM (amd64_less_unsigned,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jb .Lamd64_less_unsigned_true\n\t"
1740 "xor %rax,%rax\n\t"
1741 "jmp .Lamd64_less_unsigned_end\n\t"
1742 ".Lamd64_less_unsigned_true:\n\t"
1743 "mov $1,%rax\n\t"
1744 ".Lamd64_less_unsigned_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1746 }
1747
1748 static void
1749 amd64_emit_ref (int size)
1750 {
1751 switch (size)
1752 {
1753 case 1:
1754 EMIT_ASM (amd64_ref1,
1755 "movb (%rax),%al");
1756 break;
1757 case 2:
1758 EMIT_ASM (amd64_ref2,
1759 "movw (%rax),%ax");
1760 break;
1761 case 4:
1762 EMIT_ASM (amd64_ref4,
1763 "movl (%rax),%eax");
1764 break;
1765 case 8:
1766 EMIT_ASM (amd64_ref8,
1767 "movq (%rax),%rax");
1768 break;
1769 }
1770 }
1771
1772 static void
1773 amd64_emit_if_goto (int *offset_p, int *size_p)
1774 {
1775 EMIT_ASM (amd64_if_goto,
1776 "mov %rax,%rcx\n\t"
1777 "pop %rax\n\t"
1778 "cmp $0,%rcx\n\t"
1779 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1780 if (offset_p)
1781 *offset_p = 10;
1782 if (size_p)
1783 *size_p = 4;
1784 }
1785
1786 static void
1787 amd64_emit_goto (int *offset_p, int *size_p)
1788 {
1789 EMIT_ASM (amd64_goto,
1790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1791 if (offset_p)
1792 *offset_p = 1;
1793 if (size_p)
1794 *size_p = 4;
1795 }
1796
1797 static void
1798 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1799 {
1800 int diff = (to - (from + size));
1801 unsigned char buf[sizeof (int)];
1802
1803 if (size != 4)
1804 {
1805 emit_error = 1;
1806 return;
1807 }
1808
1809 memcpy (buf, &diff, sizeof (int));
1810 write_inferior_memory (from, buf, sizeof (int));
1811 }
1812
1813 static void
1814 amd64_emit_const (LONGEST num)
1815 {
1816 unsigned char buf[16];
1817 int i;
1818 CORE_ADDR buildaddr = current_insn_ptr;
1819
1820 i = 0;
1821 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1822 *((LONGEST *) (&buf[i])) = num;
1823 i += 8;
1824 append_insns (&buildaddr, i, buf);
1825 current_insn_ptr = buildaddr;
1826 }
1827
1828 static void
1829 amd64_emit_call (CORE_ADDR fn)
1830 {
1831 unsigned char buf[16];
1832 int i;
1833 CORE_ADDR buildaddr;
1834 LONGEST offset64;
1835
1836 /* The destination function being in the shared library, may be
1837 >31-bits away off the compiled code pad. */
1838
1839 buildaddr = current_insn_ptr;
1840
1841 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1842
1843 i = 0;
1844
1845 if (offset64 > INT_MAX || offset64 < INT_MIN)
1846 {
1847 /* Offset is too large for a call. Use callq, but that requires
1848 a register, so avoid it if possible. Use r10, since it is
1849 call-clobbered, we don't have to push/pop it. */
1850 buf[i++] = 0x48; /* mov $fn,%r10 */
1851 buf[i++] = 0xba;
1852 memcpy (buf + i, &fn, 8);
1853 i += 8;
1854 buf[i++] = 0xff; /* callq *%r10 */
1855 buf[i++] = 0xd2;
1856 }
1857 else
1858 {
1859 int offset32 = offset64; /* we know we can't overflow here. */
1860 memcpy (buf + i, &offset32, 4);
1861 i += 4;
1862 }
1863
1864 append_insns (&buildaddr, i, buf);
1865 current_insn_ptr = buildaddr;
1866 }
1867
1868 static void
1869 amd64_emit_reg (int reg)
1870 {
1871 unsigned char buf[16];
1872 int i;
1873 CORE_ADDR buildaddr;
1874
1875 /* Assume raw_regs is still in %rdi. */
1876 buildaddr = current_insn_ptr;
1877 i = 0;
1878 buf[i++] = 0xbe; /* mov $<n>,%esi */
1879 *((int *) (&buf[i])) = reg;
1880 i += 4;
1881 append_insns (&buildaddr, i, buf);
1882 current_insn_ptr = buildaddr;
1883 amd64_emit_call (get_raw_reg_func_addr ());
1884 }
1885
1886 static void
1887 amd64_emit_pop (void)
1888 {
1889 EMIT_ASM (amd64_pop,
1890 "pop %rax");
1891 }
1892
1893 static void
1894 amd64_emit_stack_flush (void)
1895 {
1896 EMIT_ASM (amd64_stack_flush,
1897 "push %rax");
1898 }
1899
1900 static void
1901 amd64_emit_zero_ext (int arg)
1902 {
1903 switch (arg)
1904 {
1905 case 8:
1906 EMIT_ASM (amd64_zero_ext_8,
1907 "and $0xff,%rax");
1908 break;
1909 case 16:
1910 EMIT_ASM (amd64_zero_ext_16,
1911 "and $0xffff,%rax");
1912 break;
1913 case 32:
1914 EMIT_ASM (amd64_zero_ext_32,
1915 "mov $0xffffffff,%rcx\n\t"
1916 "and %rcx,%rax");
1917 break;
1918 default:
1919 emit_error = 1;
1920 }
1921 }
1922
1923 static void
1924 amd64_emit_swap (void)
1925 {
1926 EMIT_ASM (amd64_swap,
1927 "mov %rax,%rcx\n\t"
1928 "pop %rax\n\t"
1929 "push %rcx");
1930 }
1931
1932 static void
1933 amd64_emit_stack_adjust (int n)
1934 {
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr = current_insn_ptr;
1938
1939 i = 0;
1940 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1941 buf[i++] = 0x8d;
1942 buf[i++] = 0x64;
1943 buf[i++] = 0x24;
1944 /* This only handles adjustments up to 16, but we don't expect any more. */
1945 buf[i++] = n * 8;
1946 append_insns (&buildaddr, i, buf);
1947 current_insn_ptr = buildaddr;
1948 }
1949
1950 /* FN's prototype is `LONGEST(*fn)(int)'. */
1951
1952 static void
1953 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1954 {
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr;
1958
1959 buildaddr = current_insn_ptr;
1960 i = 0;
1961 buf[i++] = 0xbf; /* movl $<n>,%edi */
1962 *((int *) (&buf[i])) = arg1;
1963 i += 4;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966 amd64_emit_call (fn);
1967 }
1968
1969 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1970
1971 static void
1972 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1973 {
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 buildaddr = current_insn_ptr;
1979 i = 0;
1980 buf[i++] = 0xbf; /* movl $<n>,%edi */
1981 *((int *) (&buf[i])) = arg1;
1982 i += 4;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 EMIT_ASM (amd64_void_call_2_a,
1986 /* Save away a copy of the stack top. */
1987 "push %rax\n\t"
1988 /* Also pass top as the second argument. */
1989 "mov %rax,%rsi");
1990 amd64_emit_call (fn);
1991 EMIT_ASM (amd64_void_call_2_b,
1992 /* Restore the stack top, %rax may have been trashed. */
1993 "pop %rax");
1994 }
1995
1996 struct emit_ops amd64_emit_ops =
1997 {
1998 amd64_emit_prologue,
1999 amd64_emit_epilogue,
2000 amd64_emit_add,
2001 amd64_emit_sub,
2002 amd64_emit_mul,
2003 amd64_emit_lsh,
2004 amd64_emit_rsh_signed,
2005 amd64_emit_rsh_unsigned,
2006 amd64_emit_ext,
2007 amd64_emit_log_not,
2008 amd64_emit_bit_and,
2009 amd64_emit_bit_or,
2010 amd64_emit_bit_xor,
2011 amd64_emit_bit_not,
2012 amd64_emit_equal,
2013 amd64_emit_less_signed,
2014 amd64_emit_less_unsigned,
2015 amd64_emit_ref,
2016 amd64_emit_if_goto,
2017 amd64_emit_goto,
2018 amd64_write_goto_address,
2019 amd64_emit_const,
2020 amd64_emit_call,
2021 amd64_emit_reg,
2022 amd64_emit_pop,
2023 amd64_emit_stack_flush,
2024 amd64_emit_zero_ext,
2025 amd64_emit_swap,
2026 amd64_emit_stack_adjust,
2027 amd64_emit_int_call_1,
2028 amd64_emit_void_call_2
2029 };
2030
2031 #endif /* __x86_64__ */
2032
2033 static void
2034 i386_emit_prologue (void)
2035 {
2036 EMIT_ASM32 (i386_prologue,
2037 "push %ebp\n\t"
2038 "mov %esp,%ebp");
2039 /* At this point, the raw regs base address is at 8(%ebp), and the
2040 value pointer is at 12(%ebp). */
2041 }
2042
2043 static void
2044 i386_emit_epilogue (void)
2045 {
2046 EMIT_ASM32 (i386_epilogue,
2047 "mov 12(%ebp),%ecx\n\t"
2048 "mov %eax,(%ecx)\n\t"
2049 "mov %ebx,0x4(%ecx)\n\t"
2050 "xor %eax,%eax\n\t"
2051 "pop %ebp\n\t"
2052 "ret");
2053 }
2054
2055 static void
2056 i386_emit_add (void)
2057 {
2058 EMIT_ASM32 (i386_add,
2059 "add (%esp),%eax\n\t"
2060 "adc 0x4(%esp),%ebx\n\t"
2061 "lea 0x8(%esp),%esp");
2062 }
2063
2064 static void
2065 i386_emit_sub (void)
2066 {
2067 EMIT_ASM32 (i386_sub,
2068 "subl %eax,(%esp)\n\t"
2069 "sbbl %ebx,4(%esp)\n\t"
2070 "pop %eax\n\t"
2071 "pop %ebx\n\t");
2072 }
2073
2074 static void
2075 i386_emit_mul (void)
2076 {
2077 emit_error = 1;
2078 }
2079
2080 static void
2081 i386_emit_lsh (void)
2082 {
2083 emit_error = 1;
2084 }
2085
2086 static void
2087 i386_emit_rsh_signed (void)
2088 {
2089 emit_error = 1;
2090 }
2091
2092 static void
2093 i386_emit_rsh_unsigned (void)
2094 {
2095 emit_error = 1;
2096 }
2097
2098 static void
2099 i386_emit_ext (int arg)
2100 {
2101 switch (arg)
2102 {
2103 case 8:
2104 EMIT_ASM32 (i386_ext_8,
2105 "cbtw\n\t"
2106 "cwtl\n\t"
2107 "movl %eax,%ebx\n\t"
2108 "sarl $31,%ebx");
2109 break;
2110 case 16:
2111 EMIT_ASM32 (i386_ext_16,
2112 "cwtl\n\t"
2113 "movl %eax,%ebx\n\t"
2114 "sarl $31,%ebx");
2115 break;
2116 case 32:
2117 EMIT_ASM32 (i386_ext_32,
2118 "movl %eax,%ebx\n\t"
2119 "sarl $31,%ebx");
2120 break;
2121 default:
2122 emit_error = 1;
2123 }
2124 }
2125
2126 static void
2127 i386_emit_log_not (void)
2128 {
2129 EMIT_ASM32 (i386_log_not,
2130 "or %ebx,%eax\n\t"
2131 "test %eax,%eax\n\t"
2132 "sete %cl\n\t"
2133 "xor %ebx,%ebx\n\t"
2134 "movzbl %cl,%eax");
2135 }
2136
2137 static void
2138 i386_emit_bit_and (void)
2139 {
2140 EMIT_ASM32 (i386_and,
2141 "and (%esp),%eax\n\t"
2142 "and 0x4(%esp),%ebx\n\t"
2143 "lea 0x8(%esp),%esp");
2144 }
2145
2146 static void
2147 i386_emit_bit_or (void)
2148 {
2149 EMIT_ASM32 (i386_or,
2150 "or (%esp),%eax\n\t"
2151 "or 0x4(%esp),%ebx\n\t"
2152 "lea 0x8(%esp),%esp");
2153 }
2154
2155 static void
2156 i386_emit_bit_xor (void)
2157 {
2158 EMIT_ASM32 (i386_xor,
2159 "xor (%esp),%eax\n\t"
2160 "xor 0x4(%esp),%ebx\n\t"
2161 "lea 0x8(%esp),%esp");
2162 }
2163
2164 static void
2165 i386_emit_bit_not (void)
2166 {
2167 EMIT_ASM32 (i386_bit_not,
2168 "xor $0xffffffff,%eax\n\t"
2169 "xor $0xffffffff,%ebx\n\t");
2170 }
2171
2172 static void
2173 i386_emit_equal (void)
2174 {
2175 EMIT_ASM32 (i386_equal,
2176 "cmpl %ebx,4(%esp)\n\t"
2177 "jne .Li386_equal_false\n\t"
2178 "cmpl %eax,(%esp)\n\t"
2179 "je .Li386_equal_true\n\t"
2180 ".Li386_equal_false:\n\t"
2181 "xor %eax,%eax\n\t"
2182 "jmp .Li386_equal_end\n\t"
2183 ".Li386_equal_true:\n\t"
2184 "mov $1,%eax\n\t"
2185 ".Li386_equal_end:\n\t"
2186 "xor %ebx,%ebx\n\t"
2187 "lea 0x8(%esp),%esp");
2188 }
2189
2190 static void
2191 i386_emit_less_signed (void)
2192 {
2193 EMIT_ASM32 (i386_less_signed,
2194 "cmpl %ebx,4(%esp)\n\t"
2195 "jl .Li386_less_signed_true\n\t"
2196 "jne .Li386_less_signed_false\n\t"
2197 "cmpl %eax,(%esp)\n\t"
2198 "jl .Li386_less_signed_true\n\t"
2199 ".Li386_less_signed_false:\n\t"
2200 "xor %eax,%eax\n\t"
2201 "jmp .Li386_less_signed_end\n\t"
2202 ".Li386_less_signed_true:\n\t"
2203 "mov $1,%eax\n\t"
2204 ".Li386_less_signed_end:\n\t"
2205 "xor %ebx,%ebx\n\t"
2206 "lea 0x8(%esp),%esp");
2207 }
2208
2209 static void
2210 i386_emit_less_unsigned (void)
2211 {
2212 EMIT_ASM32 (i386_less_unsigned,
2213 "cmpl %ebx,4(%esp)\n\t"
2214 "jb .Li386_less_unsigned_true\n\t"
2215 "jne .Li386_less_unsigned_false\n\t"
2216 "cmpl %eax,(%esp)\n\t"
2217 "jb .Li386_less_unsigned_true\n\t"
2218 ".Li386_less_unsigned_false:\n\t"
2219 "xor %eax,%eax\n\t"
2220 "jmp .Li386_less_unsigned_end\n\t"
2221 ".Li386_less_unsigned_true:\n\t"
2222 "mov $1,%eax\n\t"
2223 ".Li386_less_unsigned_end:\n\t"
2224 "xor %ebx,%ebx\n\t"
2225 "lea 0x8(%esp),%esp");
2226 }
2227
2228 static void
2229 i386_emit_ref (int size)
2230 {
2231 switch (size)
2232 {
2233 case 1:
2234 EMIT_ASM32 (i386_ref1,
2235 "movb (%eax),%al");
2236 break;
2237 case 2:
2238 EMIT_ASM32 (i386_ref2,
2239 "movw (%eax),%ax");
2240 break;
2241 case 4:
2242 EMIT_ASM32 (i386_ref4,
2243 "movl (%eax),%eax");
2244 break;
2245 case 8:
2246 EMIT_ASM32 (i386_ref8,
2247 "movl 4(%eax),%ebx\n\t"
2248 "movl (%eax),%eax");
2249 break;
2250 }
2251 }
2252
2253 static void
2254 i386_emit_if_goto (int *offset_p, int *size_p)
2255 {
2256 EMIT_ASM32 (i386_if_goto,
2257 "mov %eax,%ecx\n\t"
2258 "or %ebx,%ecx\n\t"
2259 "pop %eax\n\t"
2260 "pop %ebx\n\t"
2261 "cmpl $0,%ecx\n\t"
2262 /* Don't trust the assembler to choose the right jump */
2263 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2264
2265 if (offset_p)
2266 *offset_p = 11; /* be sure that this matches the sequence above */
2267 if (size_p)
2268 *size_p = 4;
2269 }
2270
2271 static void
2272 i386_emit_goto (int *offset_p, int *size_p)
2273 {
2274 EMIT_ASM32 (i386_goto,
2275 /* Don't trust the assembler to choose the right jump */
2276 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2277 if (offset_p)
2278 *offset_p = 1;
2279 if (size_p)
2280 *size_p = 4;
2281 }
2282
2283 static void
2284 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2285 {
2286 int diff = (to - (from + size));
2287 unsigned char buf[sizeof (int)];
2288
2289 /* We're only doing 4-byte sizes at the moment. */
2290 if (size != 4)
2291 {
2292 emit_error = 1;
2293 return;
2294 }
2295
2296 memcpy (buf, &diff, sizeof (int));
2297 write_inferior_memory (from, buf, sizeof (int));
2298 }
2299
2300 static void
2301 i386_emit_const (LONGEST num)
2302 {
2303 unsigned char buf[16];
2304 int i, hi;
2305 CORE_ADDR buildaddr = current_insn_ptr;
2306
2307 i = 0;
2308 buf[i++] = 0xb8; /* mov $<n>,%eax */
2309 *((int *) (&buf[i])) = (num & 0xffffffff);
2310 i += 4;
2311 hi = ((num >> 32) & 0xffffffff);
2312 if (hi)
2313 {
2314 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2315 *((int *) (&buf[i])) = hi;
2316 i += 4;
2317 }
2318 else
2319 {
2320 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2321 }
2322 append_insns (&buildaddr, i, buf);
2323 current_insn_ptr = buildaddr;
2324 }
2325
2326 static void
2327 i386_emit_call (CORE_ADDR fn)
2328 {
2329 unsigned char buf[16];
2330 int i, offset;
2331 CORE_ADDR buildaddr;
2332
2333 buildaddr = current_insn_ptr;
2334 i = 0;
2335 buf[i++] = 0xe8; /* call <reladdr> */
2336 offset = ((int) fn) - (buildaddr + 5);
2337 memcpy (buf + 1, &offset, 4);
2338 append_insns (&buildaddr, 5, buf);
2339 current_insn_ptr = buildaddr;
2340 }
2341
2342 static void
2343 i386_emit_reg (int reg)
2344 {
2345 unsigned char buf[16];
2346 int i;
2347 CORE_ADDR buildaddr;
2348
2349 EMIT_ASM32 (i386_reg_a,
2350 "sub $0x8,%esp");
2351 buildaddr = current_insn_ptr;
2352 i = 0;
2353 buf[i++] = 0xb8; /* mov $<n>,%eax */
2354 *((int *) (&buf[i])) = reg;
2355 i += 4;
2356 append_insns (&buildaddr, i, buf);
2357 current_insn_ptr = buildaddr;
2358 EMIT_ASM32 (i386_reg_b,
2359 "mov %eax,4(%esp)\n\t"
2360 "mov 8(%ebp),%eax\n\t"
2361 "mov %eax,(%esp)");
2362 i386_emit_call (get_raw_reg_func_addr ());
2363 EMIT_ASM32 (i386_reg_c,
2364 "xor %ebx,%ebx\n\t"
2365 "lea 0x8(%esp),%esp");
2366 }
2367
2368 static void
2369 i386_emit_pop (void)
2370 {
2371 EMIT_ASM32 (i386_pop,
2372 "pop %eax\n\t"
2373 "pop %ebx");
2374 }
2375
2376 static void
2377 i386_emit_stack_flush (void)
2378 {
2379 EMIT_ASM32 (i386_stack_flush,
2380 "push %ebx\n\t"
2381 "push %eax");
2382 }
2383
2384 static void
2385 i386_emit_zero_ext (int arg)
2386 {
2387 switch (arg)
2388 {
2389 case 8:
2390 EMIT_ASM32 (i386_zero_ext_8,
2391 "and $0xff,%eax\n\t"
2392 "xor %ebx,%ebx");
2393 break;
2394 case 16:
2395 EMIT_ASM32 (i386_zero_ext_16,
2396 "and $0xffff,%eax\n\t"
2397 "xor %ebx,%ebx");
2398 break;
2399 case 32:
2400 EMIT_ASM32 (i386_zero_ext_32,
2401 "xor %ebx,%ebx");
2402 break;
2403 default:
2404 emit_error = 1;
2405 }
2406 }
2407
2408 static void
2409 i386_emit_swap (void)
2410 {
2411 EMIT_ASM32 (i386_swap,
2412 "mov %eax,%ecx\n\t"
2413 "mov %ebx,%edx\n\t"
2414 "pop %eax\n\t"
2415 "pop %ebx\n\t"
2416 "push %edx\n\t"
2417 "push %ecx");
2418 }
2419
2420 static void
2421 i386_emit_stack_adjust (int n)
2422 {
2423 unsigned char buf[16];
2424 int i;
2425 CORE_ADDR buildaddr = current_insn_ptr;
2426
2427 i = 0;
2428 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2429 buf[i++] = 0x64;
2430 buf[i++] = 0x24;
2431 buf[i++] = n * 8;
2432 append_insns (&buildaddr, i, buf);
2433 current_insn_ptr = buildaddr;
2434 }
2435
2436 /* FN's prototype is `LONGEST(*fn)(int)'. */
2437
2438 static void
2439 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2440 {
2441 unsigned char buf[16];
2442 int i;
2443 CORE_ADDR buildaddr;
2444
2445 EMIT_ASM32 (i386_int_call_1_a,
2446 /* Reserve a bit of stack space. */
2447 "sub $0x8,%esp");
2448 /* Put the one argument on the stack. */
2449 buildaddr = current_insn_ptr;
2450 i = 0;
2451 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2452 buf[i++] = 0x04;
2453 buf[i++] = 0x24;
2454 *((int *) (&buf[i])) = arg1;
2455 i += 4;
2456 append_insns (&buildaddr, i, buf);
2457 current_insn_ptr = buildaddr;
2458 i386_emit_call (fn);
2459 EMIT_ASM32 (i386_int_call_1_c,
2460 "mov %edx,%ebx\n\t"
2461 "lea 0x8(%esp),%esp");
2462 }
2463
2464 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2465
2466 static void
2467 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2468 {
2469 unsigned char buf[16];
2470 int i;
2471 CORE_ADDR buildaddr;
2472
2473 EMIT_ASM32 (i386_void_call_2_a,
2474 /* Preserve %eax only; we don't have to worry about %ebx. */
2475 "push %eax\n\t"
2476 /* Reserve a bit of stack space for arguments. */
2477 "sub $0x10,%esp\n\t"
2478 /* Copy "top" to the second argument position. (Note that
2479 we can't assume function won't scribble on its
2480 arguments, so don't try to restore from this.) */
2481 "mov %eax,4(%esp)\n\t"
2482 "mov %ebx,8(%esp)");
2483 /* Put the first argument on the stack. */
2484 buildaddr = current_insn_ptr;
2485 i = 0;
2486 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2487 buf[i++] = 0x04;
2488 buf[i++] = 0x24;
2489 *((int *) (&buf[i])) = arg1;
2490 i += 4;
2491 append_insns (&buildaddr, i, buf);
2492 current_insn_ptr = buildaddr;
2493 i386_emit_call (fn);
2494 EMIT_ASM32 (i386_void_call_2_b,
2495 "lea 0x10(%esp),%esp\n\t"
2496 /* Restore original stack top. */
2497 "pop %eax");
2498 }
2499
2500 struct emit_ops i386_emit_ops =
2501 {
2502 i386_emit_prologue,
2503 i386_emit_epilogue,
2504 i386_emit_add,
2505 i386_emit_sub,
2506 i386_emit_mul,
2507 i386_emit_lsh,
2508 i386_emit_rsh_signed,
2509 i386_emit_rsh_unsigned,
2510 i386_emit_ext,
2511 i386_emit_log_not,
2512 i386_emit_bit_and,
2513 i386_emit_bit_or,
2514 i386_emit_bit_xor,
2515 i386_emit_bit_not,
2516 i386_emit_equal,
2517 i386_emit_less_signed,
2518 i386_emit_less_unsigned,
2519 i386_emit_ref,
2520 i386_emit_if_goto,
2521 i386_emit_goto,
2522 i386_write_goto_address,
2523 i386_emit_const,
2524 i386_emit_call,
2525 i386_emit_reg,
2526 i386_emit_pop,
2527 i386_emit_stack_flush,
2528 i386_emit_zero_ext,
2529 i386_emit_swap,
2530 i386_emit_stack_adjust,
2531 i386_emit_int_call_1,
2532 i386_emit_void_call_2
2533 };
2534
2535
2536 static struct emit_ops *
2537 x86_emit_ops (void)
2538 {
2539 #ifdef __x86_64__
2540 int use_64bit = register_size (0) == 8;
2541
2542 if (use_64bit)
2543 return &amd64_emit_ops;
2544 else
2545 #endif
2546 return &i386_emit_ops;
2547 }
2548
2549 /* This is initialized assuming an amd64 target.
2550 x86_arch_setup will correct it for i386 or amd64 targets. */
2551
2552 struct linux_target_ops the_low_target =
2553 {
2554 x86_arch_setup,
2555 -1,
2556 NULL,
2557 NULL,
2558 NULL,
2559 x86_get_pc,
2560 x86_set_pc,
2561 x86_breakpoint,
2562 x86_breakpoint_len,
2563 NULL,
2564 1,
2565 x86_breakpoint_at,
2566 x86_insert_point,
2567 x86_remove_point,
2568 x86_stopped_by_watchpoint,
2569 x86_stopped_data_address,
2570 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2571 native i386 case (no registers smaller than an xfer unit), and are not
2572 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2573 NULL,
2574 NULL,
2575 /* need to fix up i386 siginfo if host is amd64 */
2576 x86_siginfo_fixup,
2577 x86_linux_new_process,
2578 x86_linux_new_thread,
2579 x86_linux_prepare_to_resume,
2580 x86_linux_process_qsupported,
2581 x86_supports_tracepoints,
2582 x86_get_thread_area,
2583 x86_install_fast_tracepoint_jump_pad,
2584 x86_emit_ops
2585 };
This page took 0.085412 seconds and 4 git commands to generate.