* linux-low.c (linux_unprepare_to_access_memory): Rename to ...
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <stddef.h>
22 #include <signal.h>
23 #include <limits.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
43
44 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
46 /* Backward compatibility for gdb without XML support. */
47
48 static const char *xmltarget_i386_linux_no_xml = "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
51 </target>";
52
53 #ifdef __x86_64__
54 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
57 </target>";
58 #endif
59
60 #include <sys/reg.h>
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
63 #include <sys/uio.h>
64
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
67 #endif
68
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
71 #endif
72
73
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
76 #endif
77
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
81 #endif
82
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85 #ifndef ARCH_GET_FS
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
90 #endif
91
92 /* Per-process arch-specific data we want to keep. */
93
94 struct arch_process_info
95 {
96 struct i386_debug_reg_state debug_reg_state;
97 };
98
99 /* Per-thread arch-specific data we want to keep. */
100
101 struct arch_lwp_info
102 {
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105 };
106
107 #ifdef __x86_64__
108
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap[] =
113 {
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118 };
119
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
124
125 static const int x86_64_regmap[] =
126 {
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138 };
139
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142 #else /* ! __x86_64__ */
143
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap[] =
147 {
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152 };
153
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156 #endif
157 \f
158 /* Called by libthread_db. */
159
160 ps_err_e
161 ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163 {
164 #ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184 #endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196 }
197
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203 static int
204 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205 {
206 #ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220 #endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
236 return -1;
237
238 *addr = desc[1];
239 return 0;
240 }
241 }
242
243
244 \f
245 static int
246 i386_cannot_store_register (int regno)
247 {
248 return regno >= I386_NUM_REGS;
249 }
250
251 static int
252 i386_cannot_fetch_register (int regno)
253 {
254 return regno >= I386_NUM_REGS;
255 }
256
257 static void
258 x86_fill_gregset (struct regcache *regcache, void *buf)
259 {
260 int i;
261
262 #ifdef __x86_64__
263 if (register_size (0) == 8)
264 {
265 for (i = 0; i < X86_64_NUM_REGS; i++)
266 if (x86_64_regmap[i] != -1)
267 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
268 return;
269 }
270 #endif
271
272 for (i = 0; i < I386_NUM_REGS; i++)
273 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
274
275 collect_register_by_name (regcache, "orig_eax",
276 ((char *) buf) + ORIG_EAX * 4);
277 }
278
279 static void
280 x86_store_gregset (struct regcache *regcache, const void *buf)
281 {
282 int i;
283
284 #ifdef __x86_64__
285 if (register_size (0) == 8)
286 {
287 for (i = 0; i < X86_64_NUM_REGS; i++)
288 if (x86_64_regmap[i] != -1)
289 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
290 return;
291 }
292 #endif
293
294 for (i = 0; i < I386_NUM_REGS; i++)
295 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
296
297 supply_register_by_name (regcache, "orig_eax",
298 ((char *) buf) + ORIG_EAX * 4);
299 }
300
301 static void
302 x86_fill_fpregset (struct regcache *regcache, void *buf)
303 {
304 #ifdef __x86_64__
305 i387_cache_to_fxsave (regcache, buf);
306 #else
307 i387_cache_to_fsave (regcache, buf);
308 #endif
309 }
310
311 static void
312 x86_store_fpregset (struct regcache *regcache, const void *buf)
313 {
314 #ifdef __x86_64__
315 i387_fxsave_to_cache (regcache, buf);
316 #else
317 i387_fsave_to_cache (regcache, buf);
318 #endif
319 }
320
321 #ifndef __x86_64__
322
323 static void
324 x86_fill_fpxregset (struct regcache *regcache, void *buf)
325 {
326 i387_cache_to_fxsave (regcache, buf);
327 }
328
329 static void
330 x86_store_fpxregset (struct regcache *regcache, const void *buf)
331 {
332 i387_fxsave_to_cache (regcache, buf);
333 }
334
335 #endif
336
337 static void
338 x86_fill_xstateregset (struct regcache *regcache, void *buf)
339 {
340 i387_cache_to_xsave (regcache, buf);
341 }
342
343 static void
344 x86_store_xstateregset (struct regcache *regcache, const void *buf)
345 {
346 i387_xsave_to_cache (regcache, buf);
347 }
348
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
357
358 struct regset_info target_regsets[] =
359 {
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
362 GENERAL_REGS,
363 x86_fill_gregset, x86_store_gregset },
364 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
366 # ifndef __x86_64__
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
369 EXTENDED_REGS,
370 x86_fill_fpxregset, x86_store_fpxregset },
371 # endif
372 # endif
373 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
374 FP_REGS,
375 x86_fill_fpregset, x86_store_fpregset },
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL, NULL }
378 };
379
380 static CORE_ADDR
381 x86_get_pc (struct regcache *regcache)
382 {
383 int use_64bit = register_size (0) == 8;
384
385 if (use_64bit)
386 {
387 unsigned long pc;
388 collect_register_by_name (regcache, "rip", &pc);
389 return (CORE_ADDR) pc;
390 }
391 else
392 {
393 unsigned int pc;
394 collect_register_by_name (regcache, "eip", &pc);
395 return (CORE_ADDR) pc;
396 }
397 }
398
399 static void
400 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
401 {
402 int use_64bit = register_size (0) == 8;
403
404 if (use_64bit)
405 {
406 unsigned long newpc = pc;
407 supply_register_by_name (regcache, "rip", &newpc);
408 }
409 else
410 {
411 unsigned int newpc = pc;
412 supply_register_by_name (regcache, "eip", &newpc);
413 }
414 }
415 \f
416 static const unsigned char x86_breakpoint[] = { 0xCC };
417 #define x86_breakpoint_len 1
418
419 static int
420 x86_breakpoint_at (CORE_ADDR pc)
421 {
422 unsigned char c;
423
424 (*the_target->read_memory) (pc, &c, 1);
425 if (c == 0xCC)
426 return 1;
427
428 return 0;
429 }
430 \f
431 /* Support for debug registers. */
432
433 static unsigned long
434 x86_linux_dr_get (ptid_t ptid, int regnum)
435 {
436 int tid;
437 unsigned long value;
438
439 tid = ptid_get_lwp (ptid);
440
441 errno = 0;
442 value = ptrace (PTRACE_PEEKUSER, tid,
443 offsetof (struct user, u_debugreg[regnum]), 0);
444 if (errno != 0)
445 error ("Couldn't read debug register");
446
447 return value;
448 }
449
450 static void
451 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
452 {
453 int tid;
454
455 tid = ptid_get_lwp (ptid);
456
457 errno = 0;
458 ptrace (PTRACE_POKEUSER, tid,
459 offsetof (struct user, u_debugreg[regnum]), value);
460 if (errno != 0)
461 error ("Couldn't write debug register");
462 }
463
464 static int
465 update_debug_registers_callback (struct inferior_list_entry *entry,
466 void *pid_p)
467 {
468 struct lwp_info *lwp = (struct lwp_info *) entry;
469 int pid = *(int *) pid_p;
470
471 /* Only update the threads of this process. */
472 if (pid_of (lwp) == pid)
473 {
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp->arch_private->debug_registers_changed = 1;
477
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
480 if (!lwp->stopped)
481 linux_stop_lwp (lwp);
482 }
483
484 return 0;
485 }
486
487 /* Update the inferior's debug register REGNUM from STATE. */
488
489 void
490 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
491 {
492 /* Only update the threads of this process. */
493 int pid = pid_of (get_thread_lwp (current_inferior));
494
495 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
496 fatal ("Invalid debug register %d", regnum);
497
498 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
499 }
500
501 /* Return the inferior's debug register REGNUM. */
502
503 CORE_ADDR
504 i386_dr_low_get_addr (int regnum)
505 {
506 struct lwp_info *lwp = get_thread_lwp (current_inferior);
507 ptid_t ptid = ptid_of (lwp);
508
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR <= regnum && regnum < DR_LASTADDR);
511
512 return x86_linux_dr_get (ptid, regnum);
513 }
514
515 /* Update the inferior's DR7 debug control register from STATE. */
516
517 void
518 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
519 {
520 /* Only update the threads of this process. */
521 int pid = pid_of (get_thread_lwp (current_inferior));
522
523 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
524 }
525
526 /* Return the inferior's DR7 debug control register. */
527
528 unsigned
529 i386_dr_low_get_control (void)
530 {
531 struct lwp_info *lwp = get_thread_lwp (current_inferior);
532 ptid_t ptid = ptid_of (lwp);
533
534 return x86_linux_dr_get (ptid, DR_CONTROL);
535 }
536
537 /* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
539
540 unsigned
541 i386_dr_low_get_status (void)
542 {
543 struct lwp_info *lwp = get_thread_lwp (current_inferior);
544 ptid_t ptid = ptid_of (lwp);
545
546 return x86_linux_dr_get (ptid, DR_STATUS);
547 }
548 \f
549 /* Breakpoint/Watchpoint support. */
550
551 static int
552 x86_insert_point (char type, CORE_ADDR addr, int len)
553 {
554 struct process_info *proc = current_process ();
555 switch (type)
556 {
557 case '0':
558 {
559 int ret;
560
561 ret = prepare_to_access_memory ();
562 if (ret)
563 return -1;
564 ret = set_gdb_breakpoint_at (addr);
565 done_accessing_memory ();
566 return ret;
567 }
568 case '2':
569 case '3':
570 case '4':
571 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
572 type, addr, len);
573 default:
574 /* Unsupported. */
575 return 1;
576 }
577 }
578
579 static int
580 x86_remove_point (char type, CORE_ADDR addr, int len)
581 {
582 struct process_info *proc = current_process ();
583 switch (type)
584 {
585 case '0':
586 {
587 int ret;
588
589 ret = prepare_to_access_memory ();
590 if (ret)
591 return -1;
592 ret = delete_gdb_breakpoint_at (addr);
593 done_accessing_memory ();
594 return ret;
595 }
596 case '2':
597 case '3':
598 case '4':
599 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
600 type, addr, len);
601 default:
602 /* Unsupported. */
603 return 1;
604 }
605 }
606
607 static int
608 x86_stopped_by_watchpoint (void)
609 {
610 struct process_info *proc = current_process ();
611 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
612 }
613
614 static CORE_ADDR
615 x86_stopped_data_address (void)
616 {
617 struct process_info *proc = current_process ();
618 CORE_ADDR addr;
619 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
620 &addr))
621 return addr;
622 return 0;
623 }
624 \f
625 /* Called when a new process is created. */
626
627 static struct arch_process_info *
628 x86_linux_new_process (void)
629 {
630 struct arch_process_info *info = xcalloc (1, sizeof (*info));
631
632 i386_low_init_dregs (&info->debug_reg_state);
633
634 return info;
635 }
636
637 /* Called when a new thread is detected. */
638
639 static struct arch_lwp_info *
640 x86_linux_new_thread (void)
641 {
642 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
643
644 info->debug_registers_changed = 1;
645
646 return info;
647 }
648
649 /* Called when resuming a thread.
650 If the debug regs have changed, update the thread's copies. */
651
652 static void
653 x86_linux_prepare_to_resume (struct lwp_info *lwp)
654 {
655 ptid_t ptid = ptid_of (lwp);
656
657 if (lwp->arch_private->debug_registers_changed)
658 {
659 int i;
660 int pid = ptid_get_pid (ptid);
661 struct process_info *proc = find_process_pid (pid);
662 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
663
664 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
665 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
666
667 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
668
669 lwp->arch_private->debug_registers_changed = 0;
670 }
671
672 if (lwp->stopped_by_watchpoint)
673 x86_linux_dr_set (ptid, DR_STATUS, 0);
674 }
675 \f
676 /* When GDBSERVER is built as a 64-bit application on linux, the
677 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
678 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
679 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
680 conversion in-place ourselves. */
681
682 /* These types below (compat_*) define a siginfo type that is layout
683 compatible with the siginfo type exported by the 32-bit userspace
684 support. */
685
686 #ifdef __x86_64__
687
688 typedef int compat_int_t;
689 typedef unsigned int compat_uptr_t;
690
691 typedef int compat_time_t;
692 typedef int compat_timer_t;
693 typedef int compat_clock_t;
694
695 struct compat_timeval
696 {
697 compat_time_t tv_sec;
698 int tv_usec;
699 };
700
701 typedef union compat_sigval
702 {
703 compat_int_t sival_int;
704 compat_uptr_t sival_ptr;
705 } compat_sigval_t;
706
707 typedef struct compat_siginfo
708 {
709 int si_signo;
710 int si_errno;
711 int si_code;
712
713 union
714 {
715 int _pad[((128 / sizeof (int)) - 3)];
716
717 /* kill() */
718 struct
719 {
720 unsigned int _pid;
721 unsigned int _uid;
722 } _kill;
723
724 /* POSIX.1b timers */
725 struct
726 {
727 compat_timer_t _tid;
728 int _overrun;
729 compat_sigval_t _sigval;
730 } _timer;
731
732 /* POSIX.1b signals */
733 struct
734 {
735 unsigned int _pid;
736 unsigned int _uid;
737 compat_sigval_t _sigval;
738 } _rt;
739
740 /* SIGCHLD */
741 struct
742 {
743 unsigned int _pid;
744 unsigned int _uid;
745 int _status;
746 compat_clock_t _utime;
747 compat_clock_t _stime;
748 } _sigchld;
749
750 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
751 struct
752 {
753 unsigned int _addr;
754 } _sigfault;
755
756 /* SIGPOLL */
757 struct
758 {
759 int _band;
760 int _fd;
761 } _sigpoll;
762 } _sifields;
763 } compat_siginfo_t;
764
765 #define cpt_si_pid _sifields._kill._pid
766 #define cpt_si_uid _sifields._kill._uid
767 #define cpt_si_timerid _sifields._timer._tid
768 #define cpt_si_overrun _sifields._timer._overrun
769 #define cpt_si_status _sifields._sigchld._status
770 #define cpt_si_utime _sifields._sigchld._utime
771 #define cpt_si_stime _sifields._sigchld._stime
772 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
773 #define cpt_si_addr _sifields._sigfault._addr
774 #define cpt_si_band _sifields._sigpoll._band
775 #define cpt_si_fd _sifields._sigpoll._fd
776
777 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
778 In their place is si_timer1,si_timer2. */
779 #ifndef si_timerid
780 #define si_timerid si_timer1
781 #endif
782 #ifndef si_overrun
783 #define si_overrun si_timer2
784 #endif
785
786 static void
787 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
788 {
789 memset (to, 0, sizeof (*to));
790
791 to->si_signo = from->si_signo;
792 to->si_errno = from->si_errno;
793 to->si_code = from->si_code;
794
795 if (to->si_code < 0)
796 {
797 to->cpt_si_ptr = (intptr_t) from->si_ptr;
798 }
799 else if (to->si_code == SI_USER)
800 {
801 to->cpt_si_pid = from->si_pid;
802 to->cpt_si_uid = from->si_uid;
803 }
804 else if (to->si_code == SI_TIMER)
805 {
806 to->cpt_si_timerid = from->si_timerid;
807 to->cpt_si_overrun = from->si_overrun;
808 to->cpt_si_ptr = (intptr_t) from->si_ptr;
809 }
810 else
811 {
812 switch (to->si_signo)
813 {
814 case SIGCHLD:
815 to->cpt_si_pid = from->si_pid;
816 to->cpt_si_uid = from->si_uid;
817 to->cpt_si_status = from->si_status;
818 to->cpt_si_utime = from->si_utime;
819 to->cpt_si_stime = from->si_stime;
820 break;
821 case SIGILL:
822 case SIGFPE:
823 case SIGSEGV:
824 case SIGBUS:
825 to->cpt_si_addr = (intptr_t) from->si_addr;
826 break;
827 case SIGPOLL:
828 to->cpt_si_band = from->si_band;
829 to->cpt_si_fd = from->si_fd;
830 break;
831 default:
832 to->cpt_si_pid = from->si_pid;
833 to->cpt_si_uid = from->si_uid;
834 to->cpt_si_ptr = (intptr_t) from->si_ptr;
835 break;
836 }
837 }
838 }
839
840 static void
841 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
842 {
843 memset (to, 0, sizeof (*to));
844
845 to->si_signo = from->si_signo;
846 to->si_errno = from->si_errno;
847 to->si_code = from->si_code;
848
849 if (to->si_code < 0)
850 {
851 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
852 }
853 else if (to->si_code == SI_USER)
854 {
855 to->si_pid = from->cpt_si_pid;
856 to->si_uid = from->cpt_si_uid;
857 }
858 else if (to->si_code == SI_TIMER)
859 {
860 to->si_timerid = from->cpt_si_timerid;
861 to->si_overrun = from->cpt_si_overrun;
862 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
863 }
864 else
865 {
866 switch (to->si_signo)
867 {
868 case SIGCHLD:
869 to->si_pid = from->cpt_si_pid;
870 to->si_uid = from->cpt_si_uid;
871 to->si_status = from->cpt_si_status;
872 to->si_utime = from->cpt_si_utime;
873 to->si_stime = from->cpt_si_stime;
874 break;
875 case SIGILL:
876 case SIGFPE:
877 case SIGSEGV:
878 case SIGBUS:
879 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
880 break;
881 case SIGPOLL:
882 to->si_band = from->cpt_si_band;
883 to->si_fd = from->cpt_si_fd;
884 break;
885 default:
886 to->si_pid = from->cpt_si_pid;
887 to->si_uid = from->cpt_si_uid;
888 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
889 break;
890 }
891 }
892 }
893
894 #endif /* __x86_64__ */
895
896 /* Convert a native/host siginfo object, into/from the siginfo in the
897 layout of the inferiors' architecture. Returns true if any
898 conversion was done; false otherwise. If DIRECTION is 1, then copy
899 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
900 INF. */
901
902 static int
903 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
904 {
905 #ifdef __x86_64__
906 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
907 if (register_size (0) == 4)
908 {
909 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
910 fatal ("unexpected difference in siginfo");
911
912 if (direction == 0)
913 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
914 else
915 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
916
917 return 1;
918 }
919 #endif
920
921 return 0;
922 }
923 \f
924 static int use_xml;
925
926 /* Update gdbserver_xmltarget. */
927
928 static void
929 x86_linux_update_xmltarget (void)
930 {
931 int pid;
932 struct regset_info *regset;
933 static unsigned long long xcr0;
934 static int have_ptrace_getregset = -1;
935 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
936 static int have_ptrace_getfpxregs = -1;
937 #endif
938
939 if (!current_inferior)
940 return;
941
942 /* Before changing the register cache internal layout or the target
943 regsets, flush the contents of the current valid caches back to
944 the threads. */
945 regcache_invalidate ();
946
947 pid = pid_of (get_thread_lwp (current_inferior));
948 #ifdef __x86_64__
949 if (num_xmm_registers == 8)
950 init_registers_i386_linux ();
951 else
952 init_registers_amd64_linux ();
953 #else
954 {
955 # ifdef HAVE_PTRACE_GETFPXREGS
956 if (have_ptrace_getfpxregs == -1)
957 {
958 elf_fpxregset_t fpxregs;
959
960 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
961 {
962 have_ptrace_getfpxregs = 0;
963 x86_xcr0 = I386_XSTATE_X87_MASK;
964
965 /* Disable PTRACE_GETFPXREGS. */
966 for (regset = target_regsets;
967 regset->fill_function != NULL; regset++)
968 if (regset->get_request == PTRACE_GETFPXREGS)
969 {
970 regset->size = 0;
971 break;
972 }
973 }
974 else
975 have_ptrace_getfpxregs = 1;
976 }
977
978 if (!have_ptrace_getfpxregs)
979 {
980 init_registers_i386_mmx_linux ();
981 return;
982 }
983 # endif
984 init_registers_i386_linux ();
985 }
986 #endif
987
988 if (!use_xml)
989 {
990 /* Don't use XML. */
991 #ifdef __x86_64__
992 if (num_xmm_registers == 8)
993 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
994 else
995 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
996 #else
997 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
998 #endif
999
1000 x86_xcr0 = I386_XSTATE_SSE_MASK;
1001
1002 return;
1003 }
1004
1005 /* Check if XSAVE extended state is supported. */
1006 if (have_ptrace_getregset == -1)
1007 {
1008 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
1009 struct iovec iov;
1010
1011 iov.iov_base = xstateregs;
1012 iov.iov_len = sizeof (xstateregs);
1013
1014 /* Check if PTRACE_GETREGSET works. */
1015 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
1016 &iov) < 0)
1017 {
1018 have_ptrace_getregset = 0;
1019 return;
1020 }
1021 else
1022 have_ptrace_getregset = 1;
1023
1024 /* Get XCR0 from XSAVE extended state at byte 464. */
1025 xcr0 = xstateregs[464 / sizeof (long long)];
1026
1027 /* Use PTRACE_GETREGSET if it is available. */
1028 for (regset = target_regsets;
1029 regset->fill_function != NULL; regset++)
1030 if (regset->get_request == PTRACE_GETREGSET)
1031 regset->size = I386_XSTATE_SIZE (xcr0);
1032 else if (regset->type != GENERAL_REGS)
1033 regset->size = 0;
1034 }
1035
1036 if (have_ptrace_getregset)
1037 {
1038 /* AVX is the highest feature we support. */
1039 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1040 {
1041 x86_xcr0 = xcr0;
1042
1043 #ifdef __x86_64__
1044 /* I386 has 8 xmm regs. */
1045 if (num_xmm_registers == 8)
1046 init_registers_i386_avx_linux ();
1047 else
1048 init_registers_amd64_avx_linux ();
1049 #else
1050 init_registers_i386_avx_linux ();
1051 #endif
1052 }
1053 }
1054 }
1055
1056 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1057 PTRACE_GETREGSET. */
1058
1059 static void
1060 x86_linux_process_qsupported (const char *query)
1061 {
1062 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1063 with "i386" in qSupported query, it supports x86 XML target
1064 descriptions. */
1065 use_xml = 0;
1066 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1067 {
1068 char *copy = xstrdup (query + 13);
1069 char *p;
1070
1071 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1072 {
1073 if (strcmp (p, "i386") == 0)
1074 {
1075 use_xml = 1;
1076 break;
1077 }
1078 }
1079
1080 free (copy);
1081 }
1082
1083 x86_linux_update_xmltarget ();
1084 }
1085
1086 /* Initialize gdbserver for the architecture of the inferior. */
1087
1088 static void
1089 x86_arch_setup (void)
1090 {
1091 #ifdef __x86_64__
1092 int pid = pid_of (get_thread_lwp (current_inferior));
1093 char *file = linux_child_pid_to_exec_file (pid);
1094 int use_64bit = elf_64_file_p (file);
1095
1096 free (file);
1097
1098 if (use_64bit < 0)
1099 {
1100 /* This can only happen if /proc/<pid>/exe is unreadable,
1101 but "that can't happen" if we've gotten this far.
1102 Fall through and assume this is a 32-bit program. */
1103 }
1104 else if (use_64bit)
1105 {
1106 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1107 the_low_target.num_regs = -1;
1108 the_low_target.regmap = NULL;
1109 the_low_target.cannot_fetch_register = NULL;
1110 the_low_target.cannot_store_register = NULL;
1111
1112 /* Amd64 has 16 xmm regs. */
1113 num_xmm_registers = 16;
1114
1115 x86_linux_update_xmltarget ();
1116 return;
1117 }
1118 #endif
1119
1120 /* Ok we have a 32-bit inferior. */
1121
1122 the_low_target.num_regs = I386_NUM_REGS;
1123 the_low_target.regmap = i386_regmap;
1124 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1125 the_low_target.cannot_store_register = i386_cannot_store_register;
1126
1127 /* I386 has 8 xmm regs. */
1128 num_xmm_registers = 8;
1129
1130 x86_linux_update_xmltarget ();
1131 }
1132
1133 static int
1134 x86_supports_tracepoints (void)
1135 {
1136 return 1;
1137 }
1138
1139 static void
1140 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1141 {
1142 write_inferior_memory (*to, buf, len);
1143 *to += len;
1144 }
1145
1146 static int
1147 push_opcode (unsigned char *buf, char *op)
1148 {
1149 unsigned char *buf_org = buf;
1150
1151 while (1)
1152 {
1153 char *endptr;
1154 unsigned long ul = strtoul (op, &endptr, 16);
1155
1156 if (endptr == op)
1157 break;
1158
1159 *buf++ = ul;
1160 op = endptr;
1161 }
1162
1163 return buf - buf_org;
1164 }
1165
1166 #ifdef __x86_64__
1167
1168 /* Build a jump pad that saves registers and calls a collection
1169 function. Writes a jump instruction to the jump pad to
1170 JJUMPAD_INSN. The caller is responsible to write it in at the
1171 tracepoint address. */
1172
1173 static int
1174 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1175 CORE_ADDR collector,
1176 CORE_ADDR lockaddr,
1177 ULONGEST orig_size,
1178 CORE_ADDR *jump_entry,
1179 unsigned char *jjump_pad_insn,
1180 ULONGEST *jjump_pad_insn_size,
1181 CORE_ADDR *adjusted_insn_addr,
1182 CORE_ADDR *adjusted_insn_addr_end)
1183 {
1184 unsigned char buf[40];
1185 int i, offset;
1186 CORE_ADDR buildaddr = *jump_entry;
1187
1188 /* Build the jump pad. */
1189
1190 /* First, do tracepoint data collection. Save registers. */
1191 i = 0;
1192 /* Need to ensure stack pointer saved first. */
1193 buf[i++] = 0x54; /* push %rsp */
1194 buf[i++] = 0x55; /* push %rbp */
1195 buf[i++] = 0x57; /* push %rdi */
1196 buf[i++] = 0x56; /* push %rsi */
1197 buf[i++] = 0x52; /* push %rdx */
1198 buf[i++] = 0x51; /* push %rcx */
1199 buf[i++] = 0x53; /* push %rbx */
1200 buf[i++] = 0x50; /* push %rax */
1201 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1202 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1203 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1204 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1205 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1206 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1207 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1208 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1209 buf[i++] = 0x9c; /* pushfq */
1210 buf[i++] = 0x48; /* movl <addr>,%rdi */
1211 buf[i++] = 0xbf;
1212 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1213 i += sizeof (unsigned long);
1214 buf[i++] = 0x57; /* push %rdi */
1215 append_insns (&buildaddr, i, buf);
1216
1217 /* Stack space for the collecting_t object. */
1218 i = 0;
1219 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1220 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1221 memcpy (buf + i, &tpoint, 8);
1222 i += 8;
1223 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1224 i += push_opcode (&buf[i],
1225 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1226 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* spin-lock. */
1230 i = 0;
1231 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1232 memcpy (&buf[i], (void *) &lockaddr, 8);
1233 i += 8;
1234 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1235 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1236 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1237 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1238 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* Set up the gdb_collect call. */
1242 /* At this point, (stack pointer + 0x18) is the base of our saved
1243 register block. */
1244
1245 i = 0;
1246 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1247 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1248
1249 /* tpoint address may be 64-bit wide. */
1250 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1251 memcpy (buf + i, &tpoint, 8);
1252 i += 8;
1253 append_insns (&buildaddr, i, buf);
1254
1255 /* The collector function being in the shared library, may be
1256 >31-bits away off the jump pad. */
1257 i = 0;
1258 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1259 memcpy (buf + i, &collector, 8);
1260 i += 8;
1261 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1262 append_insns (&buildaddr, i, buf);
1263
1264 /* Clear the spin-lock. */
1265 i = 0;
1266 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1267 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1268 memcpy (buf + i, &lockaddr, 8);
1269 i += 8;
1270 append_insns (&buildaddr, i, buf);
1271
1272 /* Remove stack that had been used for the collect_t object. */
1273 i = 0;
1274 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1275 append_insns (&buildaddr, i, buf);
1276
1277 /* Restore register state. */
1278 i = 0;
1279 buf[i++] = 0x48; /* add $0x8,%rsp */
1280 buf[i++] = 0x83;
1281 buf[i++] = 0xc4;
1282 buf[i++] = 0x08;
1283 buf[i++] = 0x9d; /* popfq */
1284 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1285 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1286 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1287 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1288 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1289 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1290 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1291 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1292 buf[i++] = 0x58; /* pop %rax */
1293 buf[i++] = 0x5b; /* pop %rbx */
1294 buf[i++] = 0x59; /* pop %rcx */
1295 buf[i++] = 0x5a; /* pop %rdx */
1296 buf[i++] = 0x5e; /* pop %rsi */
1297 buf[i++] = 0x5f; /* pop %rdi */
1298 buf[i++] = 0x5d; /* pop %rbp */
1299 buf[i++] = 0x5c; /* pop %rsp */
1300 append_insns (&buildaddr, i, buf);
1301
1302 /* Now, adjust the original instruction to execute in the jump
1303 pad. */
1304 *adjusted_insn_addr = buildaddr;
1305 relocate_instruction (&buildaddr, tpaddr);
1306 *adjusted_insn_addr_end = buildaddr;
1307
1308 /* Finally, write a jump back to the program. */
1309 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1310 memcpy (buf, jump_insn, sizeof (jump_insn));
1311 memcpy (buf + 1, &offset, 4);
1312 append_insns (&buildaddr, sizeof (jump_insn), buf);
1313
1314 /* The jump pad is now built. Wire in a jump to our jump pad. This
1315 is always done last (by our caller actually), so that we can
1316 install fast tracepoints with threads running. This relies on
1317 the agent's atomic write support. */
1318 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1319 memcpy (buf, jump_insn, sizeof (jump_insn));
1320 memcpy (buf + 1, &offset, 4);
1321 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1322 *jjump_pad_insn_size = sizeof (jump_insn);
1323
1324 /* Return the end address of our pad. */
1325 *jump_entry = buildaddr;
1326
1327 return 0;
1328 }
1329
1330 #endif /* __x86_64__ */
1331
1332 /* Build a jump pad that saves registers and calls a collection
1333 function. Writes a jump instruction to the jump pad to
1334 JJUMPAD_INSN. The caller is responsible to write it in at the
1335 tracepoint address. */
1336
1337 static int
1338 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1339 CORE_ADDR collector,
1340 CORE_ADDR lockaddr,
1341 ULONGEST orig_size,
1342 CORE_ADDR *jump_entry,
1343 unsigned char *jjump_pad_insn,
1344 ULONGEST *jjump_pad_insn_size,
1345 CORE_ADDR *adjusted_insn_addr,
1346 CORE_ADDR *adjusted_insn_addr_end)
1347 {
1348 unsigned char buf[0x100];
1349 int i, offset;
1350 CORE_ADDR buildaddr = *jump_entry;
1351
1352 /* Build the jump pad. */
1353
1354 /* First, do tracepoint data collection. Save registers. */
1355 i = 0;
1356 buf[i++] = 0x60; /* pushad */
1357 buf[i++] = 0x68; /* push tpaddr aka $pc */
1358 *((int *)(buf + i)) = (int) tpaddr;
1359 i += 4;
1360 buf[i++] = 0x9c; /* pushf */
1361 buf[i++] = 0x1e; /* push %ds */
1362 buf[i++] = 0x06; /* push %es */
1363 buf[i++] = 0x0f; /* push %fs */
1364 buf[i++] = 0xa0;
1365 buf[i++] = 0x0f; /* push %gs */
1366 buf[i++] = 0xa8;
1367 buf[i++] = 0x16; /* push %ss */
1368 buf[i++] = 0x0e; /* push %cs */
1369 append_insns (&buildaddr, i, buf);
1370
1371 /* Stack space for the collecting_t object. */
1372 i = 0;
1373 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1374
1375 /* Build the object. */
1376 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1377 memcpy (buf + i, &tpoint, 4);
1378 i += 4;
1379 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1380
1381 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1382 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1383 append_insns (&buildaddr, i, buf);
1384
1385 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1386 If we cared for it, this could be using xchg alternatively. */
1387
1388 i = 0;
1389 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1390 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1391 %esp,<lockaddr> */
1392 memcpy (&buf[i], (void *) &lockaddr, 4);
1393 i += 4;
1394 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1395 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1396 append_insns (&buildaddr, i, buf);
1397
1398
1399 /* Set up arguments to the gdb_collect call. */
1400 i = 0;
1401 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1402 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1403 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1404 append_insns (&buildaddr, i, buf);
1405
1406 i = 0;
1407 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1408 append_insns (&buildaddr, i, buf);
1409
1410 i = 0;
1411 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1412 memcpy (&buf[i], (void *) &tpoint, 4);
1413 i += 4;
1414 append_insns (&buildaddr, i, buf);
1415
1416 buf[0] = 0xe8; /* call <reladdr> */
1417 offset = collector - (buildaddr + sizeof (jump_insn));
1418 memcpy (buf + 1, &offset, 4);
1419 append_insns (&buildaddr, 5, buf);
1420 /* Clean up after the call. */
1421 buf[0] = 0x83; /* add $0x8,%esp */
1422 buf[1] = 0xc4;
1423 buf[2] = 0x08;
1424 append_insns (&buildaddr, 3, buf);
1425
1426
1427 /* Clear the spin-lock. This would need the LOCK prefix on older
1428 broken archs. */
1429 i = 0;
1430 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1431 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1432 memcpy (buf + i, &lockaddr, 4);
1433 i += 4;
1434 append_insns (&buildaddr, i, buf);
1435
1436
1437 /* Remove stack that had been used for the collect_t object. */
1438 i = 0;
1439 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1440 append_insns (&buildaddr, i, buf);
1441
1442 i = 0;
1443 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1444 buf[i++] = 0xc4;
1445 buf[i++] = 0x04;
1446 buf[i++] = 0x17; /* pop %ss */
1447 buf[i++] = 0x0f; /* pop %gs */
1448 buf[i++] = 0xa9;
1449 buf[i++] = 0x0f; /* pop %fs */
1450 buf[i++] = 0xa1;
1451 buf[i++] = 0x07; /* pop %es */
1452 buf[i++] = 0x1f; /* pop %de */
1453 buf[i++] = 0x9d; /* popf */
1454 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1455 buf[i++] = 0xc4;
1456 buf[i++] = 0x04;
1457 buf[i++] = 0x61; /* popad */
1458 append_insns (&buildaddr, i, buf);
1459
1460 /* Now, adjust the original instruction to execute in the jump
1461 pad. */
1462 *adjusted_insn_addr = buildaddr;
1463 relocate_instruction (&buildaddr, tpaddr);
1464 *adjusted_insn_addr_end = buildaddr;
1465
1466 /* Write the jump back to the program. */
1467 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1468 memcpy (buf, jump_insn, sizeof (jump_insn));
1469 memcpy (buf + 1, &offset, 4);
1470 append_insns (&buildaddr, sizeof (jump_insn), buf);
1471
1472 /* The jump pad is now built. Wire in a jump to our jump pad. This
1473 is always done last (by our caller actually), so that we can
1474 install fast tracepoints with threads running. This relies on
1475 the agent's atomic write support. */
1476 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1477 memcpy (buf, jump_insn, sizeof (jump_insn));
1478 memcpy (buf + 1, &offset, 4);
1479 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1480 *jjump_pad_insn_size = sizeof (jump_insn);
1481
1482 /* Return the end address of our pad. */
1483 *jump_entry = buildaddr;
1484
1485 return 0;
1486 }
1487
1488 static int
1489 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1490 CORE_ADDR collector,
1491 CORE_ADDR lockaddr,
1492 ULONGEST orig_size,
1493 CORE_ADDR *jump_entry,
1494 unsigned char *jjump_pad_insn,
1495 ULONGEST *jjump_pad_insn_size,
1496 CORE_ADDR *adjusted_insn_addr,
1497 CORE_ADDR *adjusted_insn_addr_end)
1498 {
1499 #ifdef __x86_64__
1500 if (register_size (0) == 8)
1501 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1502 collector, lockaddr,
1503 orig_size, jump_entry,
1504 jjump_pad_insn,
1505 jjump_pad_insn_size,
1506 adjusted_insn_addr,
1507 adjusted_insn_addr_end);
1508 #endif
1509
1510 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1511 collector, lockaddr,
1512 orig_size, jump_entry,
1513 jjump_pad_insn,
1514 jjump_pad_insn_size,
1515 adjusted_insn_addr,
1516 adjusted_insn_addr_end);
1517 }
1518
1519 static void
1520 add_insns (unsigned char *start, int len)
1521 {
1522 CORE_ADDR buildaddr = current_insn_ptr;
1523
1524 if (debug_threads)
1525 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1526 len, paddress (buildaddr));
1527
1528 append_insns (&buildaddr, len, start);
1529 current_insn_ptr = buildaddr;
1530 }
1531
1532 /* Our general strategy for emitting code is to avoid specifying raw
1533 bytes whenever possible, and instead copy a block of inline asm
1534 that is embedded in the function. This is a little messy, because
1535 we need to keep the compiler from discarding what looks like dead
1536 code, plus suppress various warnings. */
1537
1538 #define EMIT_ASM(NAME, INSNS) \
1539 do \
1540 { \
1541 extern unsigned char start_ ## NAME, end_ ## NAME; \
1542 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1543 __asm__ ("jmp end_" #NAME "\n" \
1544 "\t" "start_" #NAME ":" \
1545 "\t" INSNS "\n" \
1546 "\t" "end_" #NAME ":"); \
1547 } while (0)
1548
1549 #ifdef __x86_64__
1550
1551 #define EMIT_ASM32(NAME,INSNS) \
1552 do \
1553 { \
1554 extern unsigned char start_ ## NAME, end_ ## NAME; \
1555 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1556 __asm__ (".code32\n" \
1557 "\t" "jmp end_" #NAME "\n" \
1558 "\t" "start_" #NAME ":\n" \
1559 "\t" INSNS "\n" \
1560 "\t" "end_" #NAME ":\n" \
1561 ".code64\n"); \
1562 } while (0)
1563
1564 #else
1565
1566 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1567
1568 #endif
1569
1570 #ifdef __x86_64__
1571
1572 static void
1573 amd64_emit_prologue (void)
1574 {
1575 EMIT_ASM (amd64_prologue,
1576 "pushq %rbp\n\t"
1577 "movq %rsp,%rbp\n\t"
1578 "sub $0x20,%rsp\n\t"
1579 "movq %rdi,-8(%rbp)\n\t"
1580 "movq %rsi,-16(%rbp)");
1581 }
1582
1583
1584 static void
1585 amd64_emit_epilogue (void)
1586 {
1587 EMIT_ASM (amd64_epilogue,
1588 "movq -16(%rbp),%rdi\n\t"
1589 "movq %rax,(%rdi)\n\t"
1590 "xor %rax,%rax\n\t"
1591 "leave\n\t"
1592 "ret");
1593 }
1594
1595 static void
1596 amd64_emit_add (void)
1597 {
1598 EMIT_ASM (amd64_add,
1599 "add (%rsp),%rax\n\t"
1600 "lea 0x8(%rsp),%rsp");
1601 }
1602
1603 static void
1604 amd64_emit_sub (void)
1605 {
1606 EMIT_ASM (amd64_sub,
1607 "sub %rax,(%rsp)\n\t"
1608 "pop %rax");
1609 }
1610
1611 static void
1612 amd64_emit_mul (void)
1613 {
1614 emit_error = 1;
1615 }
1616
1617 static void
1618 amd64_emit_lsh (void)
1619 {
1620 emit_error = 1;
1621 }
1622
1623 static void
1624 amd64_emit_rsh_signed (void)
1625 {
1626 emit_error = 1;
1627 }
1628
1629 static void
1630 amd64_emit_rsh_unsigned (void)
1631 {
1632 emit_error = 1;
1633 }
1634
1635 static void
1636 amd64_emit_ext (int arg)
1637 {
1638 switch (arg)
1639 {
1640 case 8:
1641 EMIT_ASM (amd64_ext_8,
1642 "cbtw\n\t"
1643 "cwtl\n\t"
1644 "cltq");
1645 break;
1646 case 16:
1647 EMIT_ASM (amd64_ext_16,
1648 "cwtl\n\t"
1649 "cltq");
1650 break;
1651 case 32:
1652 EMIT_ASM (amd64_ext_32,
1653 "cltq");
1654 break;
1655 default:
1656 emit_error = 1;
1657 }
1658 }
1659
1660 static void
1661 amd64_emit_log_not (void)
1662 {
1663 EMIT_ASM (amd64_log_not,
1664 "test %rax,%rax\n\t"
1665 "sete %cl\n\t"
1666 "movzbq %cl,%rax");
1667 }
1668
1669 static void
1670 amd64_emit_bit_and (void)
1671 {
1672 EMIT_ASM (amd64_and,
1673 "and (%rsp),%rax\n\t"
1674 "lea 0x8(%rsp),%rsp");
1675 }
1676
1677 static void
1678 amd64_emit_bit_or (void)
1679 {
1680 EMIT_ASM (amd64_or,
1681 "or (%rsp),%rax\n\t"
1682 "lea 0x8(%rsp),%rsp");
1683 }
1684
1685 static void
1686 amd64_emit_bit_xor (void)
1687 {
1688 EMIT_ASM (amd64_xor,
1689 "xor (%rsp),%rax\n\t"
1690 "lea 0x8(%rsp),%rsp");
1691 }
1692
1693 static void
1694 amd64_emit_bit_not (void)
1695 {
1696 EMIT_ASM (amd64_bit_not,
1697 "xorq $0xffffffffffffffff,%rax");
1698 }
1699
1700 static void
1701 amd64_emit_equal (void)
1702 {
1703 EMIT_ASM (amd64_equal,
1704 "cmp %rax,(%rsp)\n\t"
1705 "je .Lamd64_equal_true\n\t"
1706 "xor %rax,%rax\n\t"
1707 "jmp .Lamd64_equal_end\n\t"
1708 ".Lamd64_equal_true:\n\t"
1709 "mov $0x1,%rax\n\t"
1710 ".Lamd64_equal_end:\n\t"
1711 "lea 0x8(%rsp),%rsp");
1712 }
1713
1714 static void
1715 amd64_emit_less_signed (void)
1716 {
1717 EMIT_ASM (amd64_less_signed,
1718 "cmp %rax,(%rsp)\n\t"
1719 "jl .Lamd64_less_signed_true\n\t"
1720 "xor %rax,%rax\n\t"
1721 "jmp .Lamd64_less_signed_end\n\t"
1722 ".Lamd64_less_signed_true:\n\t"
1723 "mov $1,%rax\n\t"
1724 ".Lamd64_less_signed_end:\n\t"
1725 "lea 0x8(%rsp),%rsp");
1726 }
1727
1728 static void
1729 amd64_emit_less_unsigned (void)
1730 {
1731 EMIT_ASM (amd64_less_unsigned,
1732 "cmp %rax,(%rsp)\n\t"
1733 "jb .Lamd64_less_unsigned_true\n\t"
1734 "xor %rax,%rax\n\t"
1735 "jmp .Lamd64_less_unsigned_end\n\t"
1736 ".Lamd64_less_unsigned_true:\n\t"
1737 "mov $1,%rax\n\t"
1738 ".Lamd64_less_unsigned_end:\n\t"
1739 "lea 0x8(%rsp),%rsp");
1740 }
1741
1742 static void
1743 amd64_emit_ref (int size)
1744 {
1745 switch (size)
1746 {
1747 case 1:
1748 EMIT_ASM (amd64_ref1,
1749 "movb (%rax),%al");
1750 break;
1751 case 2:
1752 EMIT_ASM (amd64_ref2,
1753 "movw (%rax),%ax");
1754 break;
1755 case 4:
1756 EMIT_ASM (amd64_ref4,
1757 "movl (%rax),%eax");
1758 break;
1759 case 8:
1760 EMIT_ASM (amd64_ref8,
1761 "movq (%rax),%rax");
1762 break;
1763 }
1764 }
1765
1766 static void
1767 amd64_emit_if_goto (int *offset_p, int *size_p)
1768 {
1769 EMIT_ASM (amd64_if_goto,
1770 "mov %rax,%rcx\n\t"
1771 "pop %rax\n\t"
1772 "cmp $0,%rcx\n\t"
1773 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1774 if (offset_p)
1775 *offset_p = 10;
1776 if (size_p)
1777 *size_p = 4;
1778 }
1779
1780 static void
1781 amd64_emit_goto (int *offset_p, int *size_p)
1782 {
1783 EMIT_ASM (amd64_goto,
1784 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1785 if (offset_p)
1786 *offset_p = 1;
1787 if (size_p)
1788 *size_p = 4;
1789 }
1790
1791 static void
1792 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1793 {
1794 int diff = (to - (from + size));
1795 unsigned char buf[sizeof (int)];
1796
1797 if (size != 4)
1798 {
1799 emit_error = 1;
1800 return;
1801 }
1802
1803 memcpy (buf, &diff, sizeof (int));
1804 write_inferior_memory (from, buf, sizeof (int));
1805 }
1806
1807 static void
1808 amd64_emit_const (LONGEST num)
1809 {
1810 unsigned char buf[16];
1811 int i;
1812 CORE_ADDR buildaddr = current_insn_ptr;
1813
1814 i = 0;
1815 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1816 *((LONGEST *) (&buf[i])) = num;
1817 i += 8;
1818 append_insns (&buildaddr, i, buf);
1819 current_insn_ptr = buildaddr;
1820 }
1821
1822 static void
1823 amd64_emit_call (CORE_ADDR fn)
1824 {
1825 unsigned char buf[16];
1826 int i;
1827 CORE_ADDR buildaddr;
1828 LONGEST offset64;
1829
1830 /* The destination function being in the shared library, may be
1831 >31-bits away off the compiled code pad. */
1832
1833 buildaddr = current_insn_ptr;
1834
1835 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1836
1837 i = 0;
1838
1839 if (offset64 > INT_MAX || offset64 < INT_MIN)
1840 {
1841 /* Offset is too large for a call. Use callq, but that requires
1842 a register, so avoid it if possible. Use r10, since it is
1843 call-clobbered, we don't have to push/pop it. */
1844 buf[i++] = 0x48; /* mov $fn,%r10 */
1845 buf[i++] = 0xba;
1846 memcpy (buf + i, &fn, 8);
1847 i += 8;
1848 buf[i++] = 0xff; /* callq *%r10 */
1849 buf[i++] = 0xd2;
1850 }
1851 else
1852 {
1853 int offset32 = offset64; /* we know we can't overflow here. */
1854 memcpy (buf + i, &offset32, 4);
1855 i += 4;
1856 }
1857
1858 append_insns (&buildaddr, i, buf);
1859 current_insn_ptr = buildaddr;
1860 }
1861
1862 static void
1863 amd64_emit_reg (int reg)
1864 {
1865 unsigned char buf[16];
1866 int i;
1867 CORE_ADDR buildaddr;
1868
1869 /* Assume raw_regs is still in %rdi. */
1870 buildaddr = current_insn_ptr;
1871 i = 0;
1872 buf[i++] = 0xbe; /* mov $<n>,%esi */
1873 *((int *) (&buf[i])) = reg;
1874 i += 4;
1875 append_insns (&buildaddr, i, buf);
1876 current_insn_ptr = buildaddr;
1877 amd64_emit_call (get_raw_reg_func_addr ());
1878 }
1879
1880 static void
1881 amd64_emit_pop (void)
1882 {
1883 EMIT_ASM (amd64_pop,
1884 "pop %rax");
1885 }
1886
1887 static void
1888 amd64_emit_stack_flush (void)
1889 {
1890 EMIT_ASM (amd64_stack_flush,
1891 "push %rax");
1892 }
1893
1894 static void
1895 amd64_emit_zero_ext (int arg)
1896 {
1897 switch (arg)
1898 {
1899 case 8:
1900 EMIT_ASM (amd64_zero_ext_8,
1901 "and $0xff,%rax");
1902 break;
1903 case 16:
1904 EMIT_ASM (amd64_zero_ext_16,
1905 "and $0xffff,%rax");
1906 break;
1907 case 32:
1908 EMIT_ASM (amd64_zero_ext_32,
1909 "mov $0xffffffff,%rcx\n\t"
1910 "and %rcx,%rax");
1911 break;
1912 default:
1913 emit_error = 1;
1914 }
1915 }
1916
1917 static void
1918 amd64_emit_swap (void)
1919 {
1920 EMIT_ASM (amd64_swap,
1921 "mov %rax,%rcx\n\t"
1922 "pop %rax\n\t"
1923 "push %rcx");
1924 }
1925
1926 static void
1927 amd64_emit_stack_adjust (int n)
1928 {
1929 unsigned char buf[16];
1930 int i;
1931 CORE_ADDR buildaddr = current_insn_ptr;
1932
1933 i = 0;
1934 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1935 buf[i++] = 0x8d;
1936 buf[i++] = 0x64;
1937 buf[i++] = 0x24;
1938 /* This only handles adjustments up to 16, but we don't expect any more. */
1939 buf[i++] = n * 8;
1940 append_insns (&buildaddr, i, buf);
1941 current_insn_ptr = buildaddr;
1942 }
1943
1944 /* FN's prototype is `LONGEST(*fn)(int)'. */
1945
1946 static void
1947 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1948 {
1949 unsigned char buf[16];
1950 int i;
1951 CORE_ADDR buildaddr;
1952
1953 buildaddr = current_insn_ptr;
1954 i = 0;
1955 buf[i++] = 0xbf; /* movl $<n>,%edi */
1956 *((int *) (&buf[i])) = arg1;
1957 i += 4;
1958 append_insns (&buildaddr, i, buf);
1959 current_insn_ptr = buildaddr;
1960 amd64_emit_call (fn);
1961 }
1962
1963 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1964
1965 static void
1966 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1967 {
1968 unsigned char buf[16];
1969 int i;
1970 CORE_ADDR buildaddr;
1971
1972 buildaddr = current_insn_ptr;
1973 i = 0;
1974 buf[i++] = 0xbf; /* movl $<n>,%edi */
1975 *((int *) (&buf[i])) = arg1;
1976 i += 4;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 EMIT_ASM (amd64_void_call_2_a,
1980 /* Save away a copy of the stack top. */
1981 "push %rax\n\t"
1982 /* Also pass top as the second argument. */
1983 "mov %rax,%rsi");
1984 amd64_emit_call (fn);
1985 EMIT_ASM (amd64_void_call_2_b,
1986 /* Restore the stack top, %rax may have been trashed. */
1987 "pop %rax");
1988 }
1989
1990 struct emit_ops amd64_emit_ops =
1991 {
1992 amd64_emit_prologue,
1993 amd64_emit_epilogue,
1994 amd64_emit_add,
1995 amd64_emit_sub,
1996 amd64_emit_mul,
1997 amd64_emit_lsh,
1998 amd64_emit_rsh_signed,
1999 amd64_emit_rsh_unsigned,
2000 amd64_emit_ext,
2001 amd64_emit_log_not,
2002 amd64_emit_bit_and,
2003 amd64_emit_bit_or,
2004 amd64_emit_bit_xor,
2005 amd64_emit_bit_not,
2006 amd64_emit_equal,
2007 amd64_emit_less_signed,
2008 amd64_emit_less_unsigned,
2009 amd64_emit_ref,
2010 amd64_emit_if_goto,
2011 amd64_emit_goto,
2012 amd64_write_goto_address,
2013 amd64_emit_const,
2014 amd64_emit_call,
2015 amd64_emit_reg,
2016 amd64_emit_pop,
2017 amd64_emit_stack_flush,
2018 amd64_emit_zero_ext,
2019 amd64_emit_swap,
2020 amd64_emit_stack_adjust,
2021 amd64_emit_int_call_1,
2022 amd64_emit_void_call_2
2023 };
2024
2025 #endif /* __x86_64__ */
2026
2027 static void
2028 i386_emit_prologue (void)
2029 {
2030 EMIT_ASM32 (i386_prologue,
2031 "push %ebp\n\t"
2032 "mov %esp,%ebp");
2033 /* At this point, the raw regs base address is at 8(%ebp), and the
2034 value pointer is at 12(%ebp). */
2035 }
2036
2037 static void
2038 i386_emit_epilogue (void)
2039 {
2040 EMIT_ASM32 (i386_epilogue,
2041 "mov 12(%ebp),%ecx\n\t"
2042 "mov %eax,(%ecx)\n\t"
2043 "mov %ebx,0x4(%ecx)\n\t"
2044 "xor %eax,%eax\n\t"
2045 "pop %ebp\n\t"
2046 "ret");
2047 }
2048
2049 static void
2050 i386_emit_add (void)
2051 {
2052 EMIT_ASM32 (i386_add,
2053 "add (%esp),%eax\n\t"
2054 "adc 0x4(%esp),%ebx\n\t"
2055 "lea 0x8(%esp),%esp");
2056 }
2057
2058 static void
2059 i386_emit_sub (void)
2060 {
2061 EMIT_ASM32 (i386_sub,
2062 "subl %eax,(%esp)\n\t"
2063 "sbbl %ebx,4(%esp)\n\t"
2064 "pop %eax\n\t"
2065 "pop %ebx\n\t");
2066 }
2067
2068 static void
2069 i386_emit_mul (void)
2070 {
2071 emit_error = 1;
2072 }
2073
2074 static void
2075 i386_emit_lsh (void)
2076 {
2077 emit_error = 1;
2078 }
2079
2080 static void
2081 i386_emit_rsh_signed (void)
2082 {
2083 emit_error = 1;
2084 }
2085
2086 static void
2087 i386_emit_rsh_unsigned (void)
2088 {
2089 emit_error = 1;
2090 }
2091
2092 static void
2093 i386_emit_ext (int arg)
2094 {
2095 switch (arg)
2096 {
2097 case 8:
2098 EMIT_ASM32 (i386_ext_8,
2099 "cbtw\n\t"
2100 "cwtl\n\t"
2101 "movl %eax,%ebx\n\t"
2102 "sarl $31,%ebx");
2103 break;
2104 case 16:
2105 EMIT_ASM32 (i386_ext_16,
2106 "cwtl\n\t"
2107 "movl %eax,%ebx\n\t"
2108 "sarl $31,%ebx");
2109 break;
2110 case 32:
2111 EMIT_ASM32 (i386_ext_32,
2112 "movl %eax,%ebx\n\t"
2113 "sarl $31,%ebx");
2114 break;
2115 default:
2116 emit_error = 1;
2117 }
2118 }
2119
2120 static void
2121 i386_emit_log_not (void)
2122 {
2123 EMIT_ASM32 (i386_log_not,
2124 "or %ebx,%eax\n\t"
2125 "test %eax,%eax\n\t"
2126 "sete %cl\n\t"
2127 "xor %ebx,%ebx\n\t"
2128 "movzbl %cl,%eax");
2129 }
2130
2131 static void
2132 i386_emit_bit_and (void)
2133 {
2134 EMIT_ASM32 (i386_and,
2135 "and (%esp),%eax\n\t"
2136 "and 0x4(%esp),%ebx\n\t"
2137 "lea 0x8(%esp),%esp");
2138 }
2139
2140 static void
2141 i386_emit_bit_or (void)
2142 {
2143 EMIT_ASM32 (i386_or,
2144 "or (%esp),%eax\n\t"
2145 "or 0x4(%esp),%ebx\n\t"
2146 "lea 0x8(%esp),%esp");
2147 }
2148
2149 static void
2150 i386_emit_bit_xor (void)
2151 {
2152 EMIT_ASM32 (i386_xor,
2153 "xor (%esp),%eax\n\t"
2154 "xor 0x4(%esp),%ebx\n\t"
2155 "lea 0x8(%esp),%esp");
2156 }
2157
2158 static void
2159 i386_emit_bit_not (void)
2160 {
2161 EMIT_ASM32 (i386_bit_not,
2162 "xor $0xffffffff,%eax\n\t"
2163 "xor $0xffffffff,%ebx\n\t");
2164 }
2165
2166 static void
2167 i386_emit_equal (void)
2168 {
2169 EMIT_ASM32 (i386_equal,
2170 "cmpl %ebx,4(%esp)\n\t"
2171 "jne .Li386_equal_false\n\t"
2172 "cmpl %eax,(%esp)\n\t"
2173 "je .Li386_equal_true\n\t"
2174 ".Li386_equal_false:\n\t"
2175 "xor %eax,%eax\n\t"
2176 "jmp .Li386_equal_end\n\t"
2177 ".Li386_equal_true:\n\t"
2178 "mov $1,%eax\n\t"
2179 ".Li386_equal_end:\n\t"
2180 "xor %ebx,%ebx\n\t"
2181 "lea 0x8(%esp),%esp");
2182 }
2183
2184 static void
2185 i386_emit_less_signed (void)
2186 {
2187 EMIT_ASM32 (i386_less_signed,
2188 "cmpl %ebx,4(%esp)\n\t"
2189 "jl .Li386_less_signed_true\n\t"
2190 "jne .Li386_less_signed_false\n\t"
2191 "cmpl %eax,(%esp)\n\t"
2192 "jl .Li386_less_signed_true\n\t"
2193 ".Li386_less_signed_false:\n\t"
2194 "xor %eax,%eax\n\t"
2195 "jmp .Li386_less_signed_end\n\t"
2196 ".Li386_less_signed_true:\n\t"
2197 "mov $1,%eax\n\t"
2198 ".Li386_less_signed_end:\n\t"
2199 "xor %ebx,%ebx\n\t"
2200 "lea 0x8(%esp),%esp");
2201 }
2202
2203 static void
2204 i386_emit_less_unsigned (void)
2205 {
2206 EMIT_ASM32 (i386_less_unsigned,
2207 "cmpl %ebx,4(%esp)\n\t"
2208 "jb .Li386_less_unsigned_true\n\t"
2209 "jne .Li386_less_unsigned_false\n\t"
2210 "cmpl %eax,(%esp)\n\t"
2211 "jb .Li386_less_unsigned_true\n\t"
2212 ".Li386_less_unsigned_false:\n\t"
2213 "xor %eax,%eax\n\t"
2214 "jmp .Li386_less_unsigned_end\n\t"
2215 ".Li386_less_unsigned_true:\n\t"
2216 "mov $1,%eax\n\t"
2217 ".Li386_less_unsigned_end:\n\t"
2218 "xor %ebx,%ebx\n\t"
2219 "lea 0x8(%esp),%esp");
2220 }
2221
2222 static void
2223 i386_emit_ref (int size)
2224 {
2225 switch (size)
2226 {
2227 case 1:
2228 EMIT_ASM32 (i386_ref1,
2229 "movb (%eax),%al");
2230 break;
2231 case 2:
2232 EMIT_ASM32 (i386_ref2,
2233 "movw (%eax),%ax");
2234 break;
2235 case 4:
2236 EMIT_ASM32 (i386_ref4,
2237 "movl (%eax),%eax");
2238 break;
2239 case 8:
2240 EMIT_ASM32 (i386_ref8,
2241 "movl 4(%eax),%ebx\n\t"
2242 "movl (%eax),%eax");
2243 break;
2244 }
2245 }
2246
2247 static void
2248 i386_emit_if_goto (int *offset_p, int *size_p)
2249 {
2250 EMIT_ASM32 (i386_if_goto,
2251 "mov %eax,%ecx\n\t"
2252 "or %ebx,%ecx\n\t"
2253 "pop %eax\n\t"
2254 "pop %ebx\n\t"
2255 "cmpl $0,%ecx\n\t"
2256 /* Don't trust the assembler to choose the right jump */
2257 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2258
2259 if (offset_p)
2260 *offset_p = 11; /* be sure that this matches the sequence above */
2261 if (size_p)
2262 *size_p = 4;
2263 }
2264
2265 static void
2266 i386_emit_goto (int *offset_p, int *size_p)
2267 {
2268 EMIT_ASM32 (i386_goto,
2269 /* Don't trust the assembler to choose the right jump */
2270 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2271 if (offset_p)
2272 *offset_p = 1;
2273 if (size_p)
2274 *size_p = 4;
2275 }
2276
2277 static void
2278 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2279 {
2280 int diff = (to - (from + size));
2281 unsigned char buf[sizeof (int)];
2282
2283 /* We're only doing 4-byte sizes at the moment. */
2284 if (size != 4)
2285 {
2286 emit_error = 1;
2287 return;
2288 }
2289
2290 memcpy (buf, &diff, sizeof (int));
2291 write_inferior_memory (from, buf, sizeof (int));
2292 }
2293
2294 static void
2295 i386_emit_const (LONGEST num)
2296 {
2297 unsigned char buf[16];
2298 int i, hi;
2299 CORE_ADDR buildaddr = current_insn_ptr;
2300
2301 i = 0;
2302 buf[i++] = 0xb8; /* mov $<n>,%eax */
2303 *((int *) (&buf[i])) = (num & 0xffffffff);
2304 i += 4;
2305 hi = ((num >> 32) & 0xffffffff);
2306 if (hi)
2307 {
2308 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2309 *((int *) (&buf[i])) = hi;
2310 i += 4;
2311 }
2312 else
2313 {
2314 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2315 }
2316 append_insns (&buildaddr, i, buf);
2317 current_insn_ptr = buildaddr;
2318 }
2319
2320 static void
2321 i386_emit_call (CORE_ADDR fn)
2322 {
2323 unsigned char buf[16];
2324 int i, offset;
2325 CORE_ADDR buildaddr;
2326
2327 buildaddr = current_insn_ptr;
2328 i = 0;
2329 buf[i++] = 0xe8; /* call <reladdr> */
2330 offset = ((int) fn) - (buildaddr + 5);
2331 memcpy (buf + 1, &offset, 4);
2332 append_insns (&buildaddr, 5, buf);
2333 current_insn_ptr = buildaddr;
2334 }
2335
2336 static void
2337 i386_emit_reg (int reg)
2338 {
2339 unsigned char buf[16];
2340 int i;
2341 CORE_ADDR buildaddr;
2342
2343 EMIT_ASM32 (i386_reg_a,
2344 "sub $0x8,%esp");
2345 buildaddr = current_insn_ptr;
2346 i = 0;
2347 buf[i++] = 0xb8; /* mov $<n>,%eax */
2348 *((int *) (&buf[i])) = reg;
2349 i += 4;
2350 append_insns (&buildaddr, i, buf);
2351 current_insn_ptr = buildaddr;
2352 EMIT_ASM32 (i386_reg_b,
2353 "mov %eax,4(%esp)\n\t"
2354 "mov 8(%ebp),%eax\n\t"
2355 "mov %eax,(%esp)");
2356 i386_emit_call (get_raw_reg_func_addr ());
2357 EMIT_ASM32 (i386_reg_c,
2358 "xor %ebx,%ebx\n\t"
2359 "lea 0x8(%esp),%esp");
2360 }
2361
2362 static void
2363 i386_emit_pop (void)
2364 {
2365 EMIT_ASM32 (i386_pop,
2366 "pop %eax\n\t"
2367 "pop %ebx");
2368 }
2369
2370 static void
2371 i386_emit_stack_flush (void)
2372 {
2373 EMIT_ASM32 (i386_stack_flush,
2374 "push %ebx\n\t"
2375 "push %eax");
2376 }
2377
2378 static void
2379 i386_emit_zero_ext (int arg)
2380 {
2381 switch (arg)
2382 {
2383 case 8:
2384 EMIT_ASM32 (i386_zero_ext_8,
2385 "and $0xff,%eax\n\t"
2386 "xor %ebx,%ebx");
2387 break;
2388 case 16:
2389 EMIT_ASM32 (i386_zero_ext_16,
2390 "and $0xffff,%eax\n\t"
2391 "xor %ebx,%ebx");
2392 break;
2393 case 32:
2394 EMIT_ASM32 (i386_zero_ext_32,
2395 "xor %ebx,%ebx");
2396 break;
2397 default:
2398 emit_error = 1;
2399 }
2400 }
2401
2402 static void
2403 i386_emit_swap (void)
2404 {
2405 EMIT_ASM32 (i386_swap,
2406 "mov %eax,%ecx\n\t"
2407 "mov %ebx,%edx\n\t"
2408 "pop %eax\n\t"
2409 "pop %ebx\n\t"
2410 "push %edx\n\t"
2411 "push %ecx");
2412 }
2413
2414 static void
2415 i386_emit_stack_adjust (int n)
2416 {
2417 unsigned char buf[16];
2418 int i;
2419 CORE_ADDR buildaddr = current_insn_ptr;
2420
2421 i = 0;
2422 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2423 buf[i++] = 0x64;
2424 buf[i++] = 0x24;
2425 buf[i++] = n * 8;
2426 append_insns (&buildaddr, i, buf);
2427 current_insn_ptr = buildaddr;
2428 }
2429
2430 /* FN's prototype is `LONGEST(*fn)(int)'. */
2431
2432 static void
2433 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2434 {
2435 unsigned char buf[16];
2436 int i;
2437 CORE_ADDR buildaddr;
2438
2439 EMIT_ASM32 (i386_int_call_1_a,
2440 /* Reserve a bit of stack space. */
2441 "sub $0x8,%esp");
2442 /* Put the one argument on the stack. */
2443 buildaddr = current_insn_ptr;
2444 i = 0;
2445 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2446 buf[i++] = 0x04;
2447 buf[i++] = 0x24;
2448 *((int *) (&buf[i])) = arg1;
2449 i += 4;
2450 append_insns (&buildaddr, i, buf);
2451 current_insn_ptr = buildaddr;
2452 i386_emit_call (fn);
2453 EMIT_ASM32 (i386_int_call_1_c,
2454 "mov %edx,%ebx\n\t"
2455 "lea 0x8(%esp),%esp");
2456 }
2457
2458 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2459
2460 static void
2461 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2462 {
2463 unsigned char buf[16];
2464 int i;
2465 CORE_ADDR buildaddr;
2466
2467 EMIT_ASM32 (i386_void_call_2_a,
2468 /* Preserve %eax only; we don't have to worry about %ebx. */
2469 "push %eax\n\t"
2470 /* Reserve a bit of stack space for arguments. */
2471 "sub $0x10,%esp\n\t"
2472 /* Copy "top" to the second argument position. (Note that
2473 we can't assume function won't scribble on its
2474 arguments, so don't try to restore from this.) */
2475 "mov %eax,4(%esp)\n\t"
2476 "mov %ebx,8(%esp)");
2477 /* Put the first argument on the stack. */
2478 buildaddr = current_insn_ptr;
2479 i = 0;
2480 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2481 buf[i++] = 0x04;
2482 buf[i++] = 0x24;
2483 *((int *) (&buf[i])) = arg1;
2484 i += 4;
2485 append_insns (&buildaddr, i, buf);
2486 current_insn_ptr = buildaddr;
2487 i386_emit_call (fn);
2488 EMIT_ASM32 (i386_void_call_2_b,
2489 "lea 0x10(%esp),%esp\n\t"
2490 /* Restore original stack top. */
2491 "pop %eax");
2492 }
2493
2494 struct emit_ops i386_emit_ops =
2495 {
2496 i386_emit_prologue,
2497 i386_emit_epilogue,
2498 i386_emit_add,
2499 i386_emit_sub,
2500 i386_emit_mul,
2501 i386_emit_lsh,
2502 i386_emit_rsh_signed,
2503 i386_emit_rsh_unsigned,
2504 i386_emit_ext,
2505 i386_emit_log_not,
2506 i386_emit_bit_and,
2507 i386_emit_bit_or,
2508 i386_emit_bit_xor,
2509 i386_emit_bit_not,
2510 i386_emit_equal,
2511 i386_emit_less_signed,
2512 i386_emit_less_unsigned,
2513 i386_emit_ref,
2514 i386_emit_if_goto,
2515 i386_emit_goto,
2516 i386_write_goto_address,
2517 i386_emit_const,
2518 i386_emit_call,
2519 i386_emit_reg,
2520 i386_emit_pop,
2521 i386_emit_stack_flush,
2522 i386_emit_zero_ext,
2523 i386_emit_swap,
2524 i386_emit_stack_adjust,
2525 i386_emit_int_call_1,
2526 i386_emit_void_call_2
2527 };
2528
2529
2530 static struct emit_ops *
2531 x86_emit_ops (void)
2532 {
2533 #ifdef __x86_64__
2534 int use_64bit = register_size (0) == 8;
2535
2536 if (use_64bit)
2537 return &amd64_emit_ops;
2538 else
2539 #endif
2540 return &i386_emit_ops;
2541 }
2542
2543 /* This is initialized assuming an amd64 target.
2544 x86_arch_setup will correct it for i386 or amd64 targets. */
2545
2546 struct linux_target_ops the_low_target =
2547 {
2548 x86_arch_setup,
2549 -1,
2550 NULL,
2551 NULL,
2552 NULL,
2553 x86_get_pc,
2554 x86_set_pc,
2555 x86_breakpoint,
2556 x86_breakpoint_len,
2557 NULL,
2558 1,
2559 x86_breakpoint_at,
2560 x86_insert_point,
2561 x86_remove_point,
2562 x86_stopped_by_watchpoint,
2563 x86_stopped_data_address,
2564 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2565 native i386 case (no registers smaller than an xfer unit), and are not
2566 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2567 NULL,
2568 NULL,
2569 /* need to fix up i386 siginfo if host is amd64 */
2570 x86_siginfo_fixup,
2571 x86_linux_new_process,
2572 x86_linux_new_thread,
2573 x86_linux_prepare_to_resume,
2574 x86_linux_process_qsupported,
2575 x86_supports_tracepoints,
2576 x86_get_thread_area,
2577 x86_install_fast_tracepoint_jump_pad,
2578 x86_emit_ops
2579 };
This page took 0.079027 seconds and 5 git commands to generate.