Add MPX support to gdbserver.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
30
31 #include "gdb_proc_service.h"
32 #include "agent.h"
33 #include "tdesc.h"
34 #include "tracepoint.h"
35 #include "ax.h"
36
37 #ifdef __x86_64__
38 /* Defined in auto-generated file amd64-linux.c. */
39 void init_registers_amd64_linux (void);
40 extern const struct target_desc *tdesc_amd64_linux;
41
42 /* Defined in auto-generated file amd64-avx-linux.c. */
43 void init_registers_amd64_avx_linux (void);
44 extern const struct target_desc *tdesc_amd64_avx_linux;
45
46 /* Defined in auto-generated file amd64-mpx-linux.c. */
47 void init_registers_amd64_mpx_linux (void);
48 extern const struct target_desc *tdesc_amd64_mpx_linux;
49
50 /* Defined in auto-generated file x32-linux.c. */
51 void init_registers_x32_linux (void);
52 extern const struct target_desc *tdesc_x32_linux;
53
54 /* Defined in auto-generated file x32-avx-linux.c. */
55 void init_registers_x32_avx_linux (void);
56 extern const struct target_desc *tdesc_x32_avx_linux;
57
58 #endif
59
60 /* Defined in auto-generated file i386-linux.c. */
61 void init_registers_i386_linux (void);
62 extern const struct target_desc *tdesc_i386_linux;
63
64 /* Defined in auto-generated file i386-mmx-linux.c. */
65 void init_registers_i386_mmx_linux (void);
66 extern const struct target_desc *tdesc_i386_mmx_linux;
67
68 /* Defined in auto-generated file i386-avx-linux.c. */
69 void init_registers_i386_avx_linux (void);
70 extern const struct target_desc *tdesc_i386_avx_linux;
71
72 /* Defined in auto-generated file i386-mpx-linux.c. */
73 void init_registers_i386_mpx_linux (void);
74 extern const struct target_desc *tdesc_i386_mpx_linux;
75
76 #ifdef __x86_64__
77 static struct target_desc *tdesc_amd64_linux_no_xml;
78 #endif
79 static struct target_desc *tdesc_i386_linux_no_xml;
80
81
82 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
83 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
84
85 /* Backward compatibility for gdb without XML support. */
86
87 static const char *xmltarget_i386_linux_no_xml = "@<target>\
88 <architecture>i386</architecture>\
89 <osabi>GNU/Linux</osabi>\
90 </target>";
91
92 #ifdef __x86_64__
93 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
94 <architecture>i386:x86-64</architecture>\
95 <osabi>GNU/Linux</osabi>\
96 </target>";
97 #endif
98
99 #include <sys/reg.h>
100 #include <sys/procfs.h>
101 #include <sys/ptrace.h>
102 #include <sys/uio.h>
103
104 #ifndef PTRACE_GETREGSET
105 #define PTRACE_GETREGSET 0x4204
106 #endif
107
108 #ifndef PTRACE_SETREGSET
109 #define PTRACE_SETREGSET 0x4205
110 #endif
111
112
113 #ifndef PTRACE_GET_THREAD_AREA
114 #define PTRACE_GET_THREAD_AREA 25
115 #endif
116
117 /* This definition comes from prctl.h, but some kernels may not have it. */
118 #ifndef PTRACE_ARCH_PRCTL
119 #define PTRACE_ARCH_PRCTL 30
120 #endif
121
122 /* The following definitions come from prctl.h, but may be absent
123 for certain configurations. */
124 #ifndef ARCH_GET_FS
125 #define ARCH_SET_GS 0x1001
126 #define ARCH_SET_FS 0x1002
127 #define ARCH_GET_FS 0x1003
128 #define ARCH_GET_GS 0x1004
129 #endif
130
131 /* Per-process arch-specific data we want to keep. */
132
133 struct arch_process_info
134 {
135 struct i386_debug_reg_state debug_reg_state;
136 };
137
138 /* Per-thread arch-specific data we want to keep. */
139
140 struct arch_lwp_info
141 {
142 /* Non-zero if our copy differs from what's recorded in the thread. */
143 int debug_registers_changed;
144 };
145
146 #ifdef __x86_64__
147
148 /* Mapping between the general-purpose registers in `struct user'
149 format and GDB's register array layout.
150 Note that the transfer layout uses 64-bit regs. */
151 static /*const*/ int i386_regmap[] =
152 {
153 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
154 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
155 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
156 DS * 8, ES * 8, FS * 8, GS * 8
157 };
158
159 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
160
161 /* So code below doesn't have to care, i386 or amd64. */
162 #define ORIG_EAX ORIG_RAX
163
164 static const int x86_64_regmap[] =
165 {
166 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
167 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
168 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
169 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
170 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
171 DS * 8, ES * 8, FS * 8, GS * 8,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1, -1, -1, -1, -1, -1, -1, -1,
174 -1, -1, -1, -1, -1, -1, -1, -1,
175 -1,
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 ORIG_RAX * 8,
178 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
179 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
180 };
181
182 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
183
184 #else /* ! __x86_64__ */
185
186 /* Mapping between the general-purpose registers in `struct user'
187 format and GDB's register array layout. */
188 static /*const*/ int i386_regmap[] =
189 {
190 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
191 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
192 EIP * 4, EFL * 4, CS * 4, SS * 4,
193 DS * 4, ES * 4, FS * 4, GS * 4
194 };
195
196 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
197
198 #endif
199
200 #ifdef __x86_64__
201
202 /* Returns true if the current inferior belongs to a x86-64 process,
203 per the tdesc. */
204
205 static int
206 is_64bit_tdesc (void)
207 {
208 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
209
210 return register_size (regcache->tdesc, 0) == 8;
211 }
212
213 #endif
214
215 \f
216 /* Called by libthread_db. */
217
218 ps_err_e
219 ps_get_thread_area (const struct ps_prochandle *ph,
220 lwpid_t lwpid, int idx, void **base)
221 {
222 #ifdef __x86_64__
223 int use_64bit = is_64bit_tdesc ();
224
225 if (use_64bit)
226 {
227 switch (idx)
228 {
229 case FS:
230 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
231 return PS_OK;
232 break;
233 case GS:
234 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
235 return PS_OK;
236 break;
237 default:
238 return PS_BADADDR;
239 }
240 return PS_ERR;
241 }
242 #endif
243
244 {
245 unsigned int desc[4];
246
247 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
248 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
249 return PS_ERR;
250
251 /* Ensure we properly extend the value to 64-bits for x86_64. */
252 *base = (void *) (uintptr_t) desc[1];
253 return PS_OK;
254 }
255 }
256
257 /* Get the thread area address. This is used to recognize which
258 thread is which when tracing with the in-process agent library. We
259 don't read anything from the address, and treat it as opaque; it's
260 the address itself that we assume is unique per-thread. */
261
262 static int
263 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
264 {
265 #ifdef __x86_64__
266 int use_64bit = is_64bit_tdesc ();
267
268 if (use_64bit)
269 {
270 void *base;
271 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
272 {
273 *addr = (CORE_ADDR) (uintptr_t) base;
274 return 0;
275 }
276
277 return -1;
278 }
279 #endif
280
281 {
282 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
283 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
284 unsigned int desc[4];
285 ULONGEST gs = 0;
286 const int reg_thread_area = 3; /* bits to scale down register value. */
287 int idx;
288
289 collect_register_by_name (regcache, "gs", &gs);
290
291 idx = gs >> reg_thread_area;
292
293 if (ptrace (PTRACE_GET_THREAD_AREA,
294 lwpid_of (lwp),
295 (void *) (long) idx, (unsigned long) &desc) < 0)
296 return -1;
297
298 *addr = desc[1];
299 return 0;
300 }
301 }
302
303
304 \f
305 static int
306 x86_cannot_store_register (int regno)
307 {
308 #ifdef __x86_64__
309 if (is_64bit_tdesc ())
310 return 0;
311 #endif
312
313 return regno >= I386_NUM_REGS;
314 }
315
316 static int
317 x86_cannot_fetch_register (int regno)
318 {
319 #ifdef __x86_64__
320 if (is_64bit_tdesc ())
321 return 0;
322 #endif
323
324 return regno >= I386_NUM_REGS;
325 }
326
327 static void
328 x86_fill_gregset (struct regcache *regcache, void *buf)
329 {
330 int i;
331
332 #ifdef __x86_64__
333 if (register_size (regcache->tdesc, 0) == 8)
334 {
335 for (i = 0; i < X86_64_NUM_REGS; i++)
336 if (x86_64_regmap[i] != -1)
337 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
338 return;
339 }
340 #endif
341
342 for (i = 0; i < I386_NUM_REGS; i++)
343 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
344
345 collect_register_by_name (regcache, "orig_eax",
346 ((char *) buf) + ORIG_EAX * 4);
347 }
348
349 static void
350 x86_store_gregset (struct regcache *regcache, const void *buf)
351 {
352 int i;
353
354 #ifdef __x86_64__
355 if (register_size (regcache->tdesc, 0) == 8)
356 {
357 for (i = 0; i < X86_64_NUM_REGS; i++)
358 if (x86_64_regmap[i] != -1)
359 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
360 return;
361 }
362 #endif
363
364 for (i = 0; i < I386_NUM_REGS; i++)
365 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
366
367 supply_register_by_name (regcache, "orig_eax",
368 ((char *) buf) + ORIG_EAX * 4);
369 }
370
371 static void
372 x86_fill_fpregset (struct regcache *regcache, void *buf)
373 {
374 #ifdef __x86_64__
375 i387_cache_to_fxsave (regcache, buf);
376 #else
377 i387_cache_to_fsave (regcache, buf);
378 #endif
379 }
380
381 static void
382 x86_store_fpregset (struct regcache *regcache, const void *buf)
383 {
384 #ifdef __x86_64__
385 i387_fxsave_to_cache (regcache, buf);
386 #else
387 i387_fsave_to_cache (regcache, buf);
388 #endif
389 }
390
391 #ifndef __x86_64__
392
393 static void
394 x86_fill_fpxregset (struct regcache *regcache, void *buf)
395 {
396 i387_cache_to_fxsave (regcache, buf);
397 }
398
399 static void
400 x86_store_fpxregset (struct regcache *regcache, const void *buf)
401 {
402 i387_fxsave_to_cache (regcache, buf);
403 }
404
405 #endif
406
407 static void
408 x86_fill_xstateregset (struct regcache *regcache, void *buf)
409 {
410 i387_cache_to_xsave (regcache, buf);
411 }
412
413 static void
414 x86_store_xstateregset (struct regcache *regcache, const void *buf)
415 {
416 i387_xsave_to_cache (regcache, buf);
417 }
418
419 /* ??? The non-biarch i386 case stores all the i387 regs twice.
420 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
421 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
422 doesn't work. IWBN to avoid the duplication in the case where it
423 does work. Maybe the arch_setup routine could check whether it works
424 and update the supported regsets accordingly. */
425
426 static struct regset_info x86_regsets[] =
427 {
428 #ifdef HAVE_PTRACE_GETREGS
429 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
430 GENERAL_REGS,
431 x86_fill_gregset, x86_store_gregset },
432 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
433 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
434 # ifndef __x86_64__
435 # ifdef HAVE_PTRACE_GETFPXREGS
436 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
437 EXTENDED_REGS,
438 x86_fill_fpxregset, x86_store_fpxregset },
439 # endif
440 # endif
441 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
442 FP_REGS,
443 x86_fill_fpregset, x86_store_fpregset },
444 #endif /* HAVE_PTRACE_GETREGS */
445 { 0, 0, 0, -1, -1, NULL, NULL }
446 };
447
448 static CORE_ADDR
449 x86_get_pc (struct regcache *regcache)
450 {
451 int use_64bit = register_size (regcache->tdesc, 0) == 8;
452
453 if (use_64bit)
454 {
455 unsigned long pc;
456 collect_register_by_name (regcache, "rip", &pc);
457 return (CORE_ADDR) pc;
458 }
459 else
460 {
461 unsigned int pc;
462 collect_register_by_name (regcache, "eip", &pc);
463 return (CORE_ADDR) pc;
464 }
465 }
466
467 static void
468 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
469 {
470 int use_64bit = register_size (regcache->tdesc, 0) == 8;
471
472 if (use_64bit)
473 {
474 unsigned long newpc = pc;
475 supply_register_by_name (regcache, "rip", &newpc);
476 }
477 else
478 {
479 unsigned int newpc = pc;
480 supply_register_by_name (regcache, "eip", &newpc);
481 }
482 }
483 \f
484 static const unsigned char x86_breakpoint[] = { 0xCC };
485 #define x86_breakpoint_len 1
486
487 static int
488 x86_breakpoint_at (CORE_ADDR pc)
489 {
490 unsigned char c;
491
492 (*the_target->read_memory) (pc, &c, 1);
493 if (c == 0xCC)
494 return 1;
495
496 return 0;
497 }
498 \f
499 /* Support for debug registers. */
500
501 static unsigned long
502 x86_linux_dr_get (ptid_t ptid, int regnum)
503 {
504 int tid;
505 unsigned long value;
506
507 tid = ptid_get_lwp (ptid);
508
509 errno = 0;
510 value = ptrace (PTRACE_PEEKUSER, tid,
511 offsetof (struct user, u_debugreg[regnum]), 0);
512 if (errno != 0)
513 error ("Couldn't read debug register");
514
515 return value;
516 }
517
518 static void
519 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
520 {
521 int tid;
522
523 tid = ptid_get_lwp (ptid);
524
525 errno = 0;
526 ptrace (PTRACE_POKEUSER, tid,
527 offsetof (struct user, u_debugreg[regnum]), value);
528 if (errno != 0)
529 error ("Couldn't write debug register");
530 }
531
532 static int
533 update_debug_registers_callback (struct inferior_list_entry *entry,
534 void *pid_p)
535 {
536 struct lwp_info *lwp = (struct lwp_info *) entry;
537 int pid = *(int *) pid_p;
538
539 /* Only update the threads of this process. */
540 if (pid_of (lwp) == pid)
541 {
542 /* The actual update is done later just before resuming the lwp,
543 we just mark that the registers need updating. */
544 lwp->arch_private->debug_registers_changed = 1;
545
546 /* If the lwp isn't stopped, force it to momentarily pause, so
547 we can update its debug registers. */
548 if (!lwp->stopped)
549 linux_stop_lwp (lwp);
550 }
551
552 return 0;
553 }
554
555 /* Update the inferior's debug register REGNUM from STATE. */
556
557 void
558 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
559 {
560 /* Only update the threads of this process. */
561 int pid = pid_of (get_thread_lwp (current_inferior));
562
563 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
564 fatal ("Invalid debug register %d", regnum);
565
566 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
567 }
568
569 /* Return the inferior's debug register REGNUM. */
570
571 CORE_ADDR
572 i386_dr_low_get_addr (int regnum)
573 {
574 struct lwp_info *lwp = get_thread_lwp (current_inferior);
575 ptid_t ptid = ptid_of (lwp);
576
577 /* DR6 and DR7 are retrieved with some other way. */
578 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
579
580 return x86_linux_dr_get (ptid, regnum);
581 }
582
583 /* Update the inferior's DR7 debug control register from STATE. */
584
585 void
586 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
587 {
588 /* Only update the threads of this process. */
589 int pid = pid_of (get_thread_lwp (current_inferior));
590
591 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
592 }
593
594 /* Return the inferior's DR7 debug control register. */
595
596 unsigned
597 i386_dr_low_get_control (void)
598 {
599 struct lwp_info *lwp = get_thread_lwp (current_inferior);
600 ptid_t ptid = ptid_of (lwp);
601
602 return x86_linux_dr_get (ptid, DR_CONTROL);
603 }
604
605 /* Get the value of the DR6 debug status register from the inferior
606 and record it in STATE. */
607
608 unsigned
609 i386_dr_low_get_status (void)
610 {
611 struct lwp_info *lwp = get_thread_lwp (current_inferior);
612 ptid_t ptid = ptid_of (lwp);
613
614 return x86_linux_dr_get (ptid, DR_STATUS);
615 }
616 \f
617 /* Breakpoint/Watchpoint support. */
618
619 static int
620 x86_insert_point (char type, CORE_ADDR addr, int len)
621 {
622 struct process_info *proc = current_process ();
623 switch (type)
624 {
625 case '0': /* software-breakpoint */
626 {
627 int ret;
628
629 ret = prepare_to_access_memory ();
630 if (ret)
631 return -1;
632 ret = set_gdb_breakpoint_at (addr);
633 done_accessing_memory ();
634 return ret;
635 }
636 case '1': /* hardware-breakpoint */
637 case '2': /* write watchpoint */
638 case '3': /* read watchpoint */
639 case '4': /* access watchpoint */
640 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
641 type, addr, len);
642
643 default:
644 /* Unsupported. */
645 return 1;
646 }
647 }
648
649 static int
650 x86_remove_point (char type, CORE_ADDR addr, int len)
651 {
652 struct process_info *proc = current_process ();
653 switch (type)
654 {
655 case '0': /* software-breakpoint */
656 {
657 int ret;
658
659 ret = prepare_to_access_memory ();
660 if (ret)
661 return -1;
662 ret = delete_gdb_breakpoint_at (addr);
663 done_accessing_memory ();
664 return ret;
665 }
666 case '1': /* hardware-breakpoint */
667 case '2': /* write watchpoint */
668 case '3': /* read watchpoint */
669 case '4': /* access watchpoint */
670 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
671 type, addr, len);
672 default:
673 /* Unsupported. */
674 return 1;
675 }
676 }
677
678 static int
679 x86_stopped_by_watchpoint (void)
680 {
681 struct process_info *proc = current_process ();
682 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
683 }
684
685 static CORE_ADDR
686 x86_stopped_data_address (void)
687 {
688 struct process_info *proc = current_process ();
689 CORE_ADDR addr;
690 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
691 &addr))
692 return addr;
693 return 0;
694 }
695 \f
696 /* Called when a new process is created. */
697
698 static struct arch_process_info *
699 x86_linux_new_process (void)
700 {
701 struct arch_process_info *info = xcalloc (1, sizeof (*info));
702
703 i386_low_init_dregs (&info->debug_reg_state);
704
705 return info;
706 }
707
708 /* Called when a new thread is detected. */
709
710 static struct arch_lwp_info *
711 x86_linux_new_thread (void)
712 {
713 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
714
715 info->debug_registers_changed = 1;
716
717 return info;
718 }
719
720 /* Called when resuming a thread.
721 If the debug regs have changed, update the thread's copies. */
722
723 static void
724 x86_linux_prepare_to_resume (struct lwp_info *lwp)
725 {
726 ptid_t ptid = ptid_of (lwp);
727 int clear_status = 0;
728
729 if (lwp->arch_private->debug_registers_changed)
730 {
731 int i;
732 int pid = ptid_get_pid (ptid);
733 struct process_info *proc = find_process_pid (pid);
734 struct i386_debug_reg_state *state
735 = &proc->private->arch_private->debug_reg_state;
736
737 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
738 if (state->dr_ref_count[i] > 0)
739 {
740 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
741
742 /* If we're setting a watchpoint, any change the inferior
743 had done itself to the debug registers needs to be
744 discarded, otherwise, i386_low_stopped_data_address can
745 get confused. */
746 clear_status = 1;
747 }
748
749 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
750
751 lwp->arch_private->debug_registers_changed = 0;
752 }
753
754 if (clear_status || lwp->stopped_by_watchpoint)
755 x86_linux_dr_set (ptid, DR_STATUS, 0);
756 }
757 \f
758 /* When GDBSERVER is built as a 64-bit application on linux, the
759 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
760 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
761 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
762 conversion in-place ourselves. */
763
764 /* These types below (compat_*) define a siginfo type that is layout
765 compatible with the siginfo type exported by the 32-bit userspace
766 support. */
767
768 #ifdef __x86_64__
769
770 typedef int compat_int_t;
771 typedef unsigned int compat_uptr_t;
772
773 typedef int compat_time_t;
774 typedef int compat_timer_t;
775 typedef int compat_clock_t;
776
777 struct compat_timeval
778 {
779 compat_time_t tv_sec;
780 int tv_usec;
781 };
782
783 typedef union compat_sigval
784 {
785 compat_int_t sival_int;
786 compat_uptr_t sival_ptr;
787 } compat_sigval_t;
788
789 typedef struct compat_siginfo
790 {
791 int si_signo;
792 int si_errno;
793 int si_code;
794
795 union
796 {
797 int _pad[((128 / sizeof (int)) - 3)];
798
799 /* kill() */
800 struct
801 {
802 unsigned int _pid;
803 unsigned int _uid;
804 } _kill;
805
806 /* POSIX.1b timers */
807 struct
808 {
809 compat_timer_t _tid;
810 int _overrun;
811 compat_sigval_t _sigval;
812 } _timer;
813
814 /* POSIX.1b signals */
815 struct
816 {
817 unsigned int _pid;
818 unsigned int _uid;
819 compat_sigval_t _sigval;
820 } _rt;
821
822 /* SIGCHLD */
823 struct
824 {
825 unsigned int _pid;
826 unsigned int _uid;
827 int _status;
828 compat_clock_t _utime;
829 compat_clock_t _stime;
830 } _sigchld;
831
832 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
833 struct
834 {
835 unsigned int _addr;
836 } _sigfault;
837
838 /* SIGPOLL */
839 struct
840 {
841 int _band;
842 int _fd;
843 } _sigpoll;
844 } _sifields;
845 } compat_siginfo_t;
846
847 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
848 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
849
850 typedef struct compat_x32_siginfo
851 {
852 int si_signo;
853 int si_errno;
854 int si_code;
855
856 union
857 {
858 int _pad[((128 / sizeof (int)) - 3)];
859
860 /* kill() */
861 struct
862 {
863 unsigned int _pid;
864 unsigned int _uid;
865 } _kill;
866
867 /* POSIX.1b timers */
868 struct
869 {
870 compat_timer_t _tid;
871 int _overrun;
872 compat_sigval_t _sigval;
873 } _timer;
874
875 /* POSIX.1b signals */
876 struct
877 {
878 unsigned int _pid;
879 unsigned int _uid;
880 compat_sigval_t _sigval;
881 } _rt;
882
883 /* SIGCHLD */
884 struct
885 {
886 unsigned int _pid;
887 unsigned int _uid;
888 int _status;
889 compat_x32_clock_t _utime;
890 compat_x32_clock_t _stime;
891 } _sigchld;
892
893 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
894 struct
895 {
896 unsigned int _addr;
897 } _sigfault;
898
899 /* SIGPOLL */
900 struct
901 {
902 int _band;
903 int _fd;
904 } _sigpoll;
905 } _sifields;
906 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
907
908 #define cpt_si_pid _sifields._kill._pid
909 #define cpt_si_uid _sifields._kill._uid
910 #define cpt_si_timerid _sifields._timer._tid
911 #define cpt_si_overrun _sifields._timer._overrun
912 #define cpt_si_status _sifields._sigchld._status
913 #define cpt_si_utime _sifields._sigchld._utime
914 #define cpt_si_stime _sifields._sigchld._stime
915 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
916 #define cpt_si_addr _sifields._sigfault._addr
917 #define cpt_si_band _sifields._sigpoll._band
918 #define cpt_si_fd _sifields._sigpoll._fd
919
920 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
921 In their place is si_timer1,si_timer2. */
922 #ifndef si_timerid
923 #define si_timerid si_timer1
924 #endif
925 #ifndef si_overrun
926 #define si_overrun si_timer2
927 #endif
928
929 static void
930 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
931 {
932 memset (to, 0, sizeof (*to));
933
934 to->si_signo = from->si_signo;
935 to->si_errno = from->si_errno;
936 to->si_code = from->si_code;
937
938 if (to->si_code == SI_TIMER)
939 {
940 to->cpt_si_timerid = from->si_timerid;
941 to->cpt_si_overrun = from->si_overrun;
942 to->cpt_si_ptr = (intptr_t) from->si_ptr;
943 }
944 else if (to->si_code == SI_USER)
945 {
946 to->cpt_si_pid = from->si_pid;
947 to->cpt_si_uid = from->si_uid;
948 }
949 else if (to->si_code < 0)
950 {
951 to->cpt_si_pid = from->si_pid;
952 to->cpt_si_uid = from->si_uid;
953 to->cpt_si_ptr = (intptr_t) from->si_ptr;
954 }
955 else
956 {
957 switch (to->si_signo)
958 {
959 case SIGCHLD:
960 to->cpt_si_pid = from->si_pid;
961 to->cpt_si_uid = from->si_uid;
962 to->cpt_si_status = from->si_status;
963 to->cpt_si_utime = from->si_utime;
964 to->cpt_si_stime = from->si_stime;
965 break;
966 case SIGILL:
967 case SIGFPE:
968 case SIGSEGV:
969 case SIGBUS:
970 to->cpt_si_addr = (intptr_t) from->si_addr;
971 break;
972 case SIGPOLL:
973 to->cpt_si_band = from->si_band;
974 to->cpt_si_fd = from->si_fd;
975 break;
976 default:
977 to->cpt_si_pid = from->si_pid;
978 to->cpt_si_uid = from->si_uid;
979 to->cpt_si_ptr = (intptr_t) from->si_ptr;
980 break;
981 }
982 }
983 }
984
985 static void
986 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
987 {
988 memset (to, 0, sizeof (*to));
989
990 to->si_signo = from->si_signo;
991 to->si_errno = from->si_errno;
992 to->si_code = from->si_code;
993
994 if (to->si_code == SI_TIMER)
995 {
996 to->si_timerid = from->cpt_si_timerid;
997 to->si_overrun = from->cpt_si_overrun;
998 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
999 }
1000 else if (to->si_code == SI_USER)
1001 {
1002 to->si_pid = from->cpt_si_pid;
1003 to->si_uid = from->cpt_si_uid;
1004 }
1005 else if (to->si_code < 0)
1006 {
1007 to->si_pid = from->cpt_si_pid;
1008 to->si_uid = from->cpt_si_uid;
1009 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1010 }
1011 else
1012 {
1013 switch (to->si_signo)
1014 {
1015 case SIGCHLD:
1016 to->si_pid = from->cpt_si_pid;
1017 to->si_uid = from->cpt_si_uid;
1018 to->si_status = from->cpt_si_status;
1019 to->si_utime = from->cpt_si_utime;
1020 to->si_stime = from->cpt_si_stime;
1021 break;
1022 case SIGILL:
1023 case SIGFPE:
1024 case SIGSEGV:
1025 case SIGBUS:
1026 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1027 break;
1028 case SIGPOLL:
1029 to->si_band = from->cpt_si_band;
1030 to->si_fd = from->cpt_si_fd;
1031 break;
1032 default:
1033 to->si_pid = from->cpt_si_pid;
1034 to->si_uid = from->cpt_si_uid;
1035 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1036 break;
1037 }
1038 }
1039 }
1040
1041 static void
1042 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1043 siginfo_t *from)
1044 {
1045 memset (to, 0, sizeof (*to));
1046
1047 to->si_signo = from->si_signo;
1048 to->si_errno = from->si_errno;
1049 to->si_code = from->si_code;
1050
1051 if (to->si_code == SI_TIMER)
1052 {
1053 to->cpt_si_timerid = from->si_timerid;
1054 to->cpt_si_overrun = from->si_overrun;
1055 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1056 }
1057 else if (to->si_code == SI_USER)
1058 {
1059 to->cpt_si_pid = from->si_pid;
1060 to->cpt_si_uid = from->si_uid;
1061 }
1062 else if (to->si_code < 0)
1063 {
1064 to->cpt_si_pid = from->si_pid;
1065 to->cpt_si_uid = from->si_uid;
1066 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1067 }
1068 else
1069 {
1070 switch (to->si_signo)
1071 {
1072 case SIGCHLD:
1073 to->cpt_si_pid = from->si_pid;
1074 to->cpt_si_uid = from->si_uid;
1075 to->cpt_si_status = from->si_status;
1076 to->cpt_si_utime = from->si_utime;
1077 to->cpt_si_stime = from->si_stime;
1078 break;
1079 case SIGILL:
1080 case SIGFPE:
1081 case SIGSEGV:
1082 case SIGBUS:
1083 to->cpt_si_addr = (intptr_t) from->si_addr;
1084 break;
1085 case SIGPOLL:
1086 to->cpt_si_band = from->si_band;
1087 to->cpt_si_fd = from->si_fd;
1088 break;
1089 default:
1090 to->cpt_si_pid = from->si_pid;
1091 to->cpt_si_uid = from->si_uid;
1092 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1093 break;
1094 }
1095 }
1096 }
1097
1098 static void
1099 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1100 compat_x32_siginfo_t *from)
1101 {
1102 memset (to, 0, sizeof (*to));
1103
1104 to->si_signo = from->si_signo;
1105 to->si_errno = from->si_errno;
1106 to->si_code = from->si_code;
1107
1108 if (to->si_code == SI_TIMER)
1109 {
1110 to->si_timerid = from->cpt_si_timerid;
1111 to->si_overrun = from->cpt_si_overrun;
1112 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1113 }
1114 else if (to->si_code == SI_USER)
1115 {
1116 to->si_pid = from->cpt_si_pid;
1117 to->si_uid = from->cpt_si_uid;
1118 }
1119 else if (to->si_code < 0)
1120 {
1121 to->si_pid = from->cpt_si_pid;
1122 to->si_uid = from->cpt_si_uid;
1123 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1124 }
1125 else
1126 {
1127 switch (to->si_signo)
1128 {
1129 case SIGCHLD:
1130 to->si_pid = from->cpt_si_pid;
1131 to->si_uid = from->cpt_si_uid;
1132 to->si_status = from->cpt_si_status;
1133 to->si_utime = from->cpt_si_utime;
1134 to->si_stime = from->cpt_si_stime;
1135 break;
1136 case SIGILL:
1137 case SIGFPE:
1138 case SIGSEGV:
1139 case SIGBUS:
1140 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1141 break;
1142 case SIGPOLL:
1143 to->si_band = from->cpt_si_band;
1144 to->si_fd = from->cpt_si_fd;
1145 break;
1146 default:
1147 to->si_pid = from->cpt_si_pid;
1148 to->si_uid = from->cpt_si_uid;
1149 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1150 break;
1151 }
1152 }
1153 }
1154
1155 #endif /* __x86_64__ */
1156
1157 /* Convert a native/host siginfo object, into/from the siginfo in the
1158 layout of the inferiors' architecture. Returns true if any
1159 conversion was done; false otherwise. If DIRECTION is 1, then copy
1160 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1161 INF. */
1162
1163 static int
1164 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1165 {
1166 #ifdef __x86_64__
1167 unsigned int machine;
1168 int tid = lwpid_of (get_thread_lwp (current_inferior));
1169 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1170
1171 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1172 if (!is_64bit_tdesc ())
1173 {
1174 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
1175 fatal ("unexpected difference in siginfo");
1176
1177 if (direction == 0)
1178 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1179 else
1180 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1181
1182 return 1;
1183 }
1184 /* No fixup for native x32 GDB. */
1185 else if (!is_elf64 && sizeof (void *) == 8)
1186 {
1187 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1188 fatal ("unexpected difference in siginfo");
1189
1190 if (direction == 0)
1191 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1192 native);
1193 else
1194 siginfo_from_compat_x32_siginfo (native,
1195 (struct compat_x32_siginfo *) inf);
1196
1197 return 1;
1198 }
1199 #endif
1200
1201 return 0;
1202 }
1203 \f
1204 static int use_xml;
1205
1206 /* Format of XSAVE extended state is:
1207 struct
1208 {
1209 fxsave_bytes[0..463]
1210 sw_usable_bytes[464..511]
1211 xstate_hdr_bytes[512..575]
1212 avx_bytes[576..831]
1213 future_state etc
1214 };
1215
1216 Same memory layout will be used for the coredump NT_X86_XSTATE
1217 representing the XSAVE extended state registers.
1218
1219 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1220 extended state mask, which is the same as the extended control register
1221 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1222 together with the mask saved in the xstate_hdr_bytes to determine what
1223 states the processor/OS supports and what state, used or initialized,
1224 the process/thread is in. */
1225 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1226
1227 /* Does the current host support the GETFPXREGS request? The header
1228 file may or may not define it, and even if it is defined, the
1229 kernel will return EIO if it's running on a pre-SSE processor. */
1230 int have_ptrace_getfpxregs =
1231 #ifdef HAVE_PTRACE_GETFPXREGS
1232 -1
1233 #else
1234 0
1235 #endif
1236 ;
1237
1238 /* Does the current host support PTRACE_GETREGSET? */
1239 static int have_ptrace_getregset = -1;
1240
1241 /* Get Linux/x86 target description from running target. */
1242
1243 static const struct target_desc *
1244 x86_linux_read_description (void)
1245 {
1246 unsigned int machine;
1247 int is_elf64;
1248 int xcr0_features;
1249 int tid;
1250 static uint64_t xcr0;
1251 struct regset_info *regset;
1252
1253 tid = lwpid_of (get_thread_lwp (current_inferior));
1254
1255 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1256
1257 if (sizeof (void *) == 4)
1258 {
1259 if (is_elf64 > 0)
1260 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1261 #ifndef __x86_64__
1262 else if (machine == EM_X86_64)
1263 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1264 #endif
1265 }
1266
1267 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1268 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1269 {
1270 elf_fpxregset_t fpxregs;
1271
1272 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1273 {
1274 have_ptrace_getfpxregs = 0;
1275 have_ptrace_getregset = 0;
1276 return tdesc_i386_mmx_linux;
1277 }
1278 else
1279 have_ptrace_getfpxregs = 1;
1280 }
1281 #endif
1282
1283 if (!use_xml)
1284 {
1285 x86_xcr0 = I386_XSTATE_SSE_MASK;
1286
1287 /* Don't use XML. */
1288 #ifdef __x86_64__
1289 if (machine == EM_X86_64)
1290 return tdesc_amd64_linux_no_xml;
1291 else
1292 #endif
1293 return tdesc_i386_linux_no_xml;
1294 }
1295
1296 if (have_ptrace_getregset == -1)
1297 {
1298 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1299 struct iovec iov;
1300
1301 iov.iov_base = xstateregs;
1302 iov.iov_len = sizeof (xstateregs);
1303
1304 /* Check if PTRACE_GETREGSET works. */
1305 if (ptrace (PTRACE_GETREGSET, tid,
1306 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1307 have_ptrace_getregset = 0;
1308 else
1309 {
1310 have_ptrace_getregset = 1;
1311
1312 /* Get XCR0 from XSAVE extended state. */
1313 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1314 / sizeof (uint64_t))];
1315
1316 /* Use PTRACE_GETREGSET if it is available. */
1317 for (regset = x86_regsets;
1318 regset->fill_function != NULL; regset++)
1319 if (regset->get_request == PTRACE_GETREGSET)
1320 regset->size = I386_XSTATE_SIZE (xcr0);
1321 else if (regset->type != GENERAL_REGS)
1322 regset->size = 0;
1323 }
1324 }
1325
1326 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1327 xcr0_features = (have_ptrace_getregset
1328 && (xcr0 & I386_XSTATE_ALL_MASK));
1329
1330 if (xcr0_features)
1331 x86_xcr0 = xcr0;
1332
1333 if (machine == EM_X86_64)
1334 {
1335 #ifdef __x86_64__
1336 if (is_elf64)
1337 {
1338 if (xcr0_features)
1339 {
1340 switch (xcr0 & I386_XSTATE_ALL_MASK)
1341 {
1342 case I386_XSTATE_MPX_MASK:
1343 return tdesc_amd64_mpx_linux;
1344
1345 case I386_XSTATE_AVX_MASK:
1346 return tdesc_amd64_avx_linux;
1347
1348 default:
1349 return tdesc_amd64_linux;
1350 }
1351 }
1352 else
1353 return tdesc_amd64_linux;
1354 }
1355 else
1356 {
1357 if (xcr0_features)
1358 {
1359 switch (xcr0 & I386_XSTATE_ALL_MASK)
1360 {
1361 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1362 case I386_XSTATE_AVX_MASK:
1363 return tdesc_x32_avx_linux;
1364
1365 default:
1366 return tdesc_x32_linux;
1367 }
1368 }
1369 else
1370 return tdesc_x32_linux;
1371 }
1372 #endif
1373 }
1374 else
1375 {
1376 if (xcr0_features)
1377 {
1378 switch (xcr0 & I386_XSTATE_ALL_MASK)
1379 {
1380 case (I386_XSTATE_MPX_MASK):
1381 return tdesc_i386_mpx_linux;
1382
1383 case (I386_XSTATE_AVX_MASK):
1384 return tdesc_i386_avx_linux;
1385
1386 default:
1387 return tdesc_i386_linux;
1388 }
1389 }
1390 else
1391 return tdesc_i386_linux;
1392 }
1393
1394 gdb_assert_not_reached ("failed to return tdesc");
1395 }
1396
1397 /* Callback for find_inferior. Stops iteration when a thread with a
1398 given PID is found. */
1399
1400 static int
1401 same_process_callback (struct inferior_list_entry *entry, void *data)
1402 {
1403 int pid = *(int *) data;
1404
1405 return (ptid_get_pid (entry->id) == pid);
1406 }
1407
1408 /* Callback for for_each_inferior. Calls the arch_setup routine for
1409 each process. */
1410
1411 static void
1412 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1413 {
1414 int pid = ptid_get_pid (entry->id);
1415
1416 /* Look up any thread of this processes. */
1417 current_inferior
1418 = (struct thread_info *) find_inferior (&all_threads,
1419 same_process_callback, &pid);
1420
1421 the_low_target.arch_setup ();
1422 }
1423
1424 /* Update all the target description of all processes; a new GDB
1425 connected, and it may or not support xml target descriptions. */
1426
1427 static void
1428 x86_linux_update_xmltarget (void)
1429 {
1430 struct thread_info *save_inferior = current_inferior;
1431
1432 /* Before changing the register cache's internal layout, flush the
1433 contents of the current valid caches back to the threads, and
1434 release the current regcache objects. */
1435 regcache_release ();
1436
1437 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1438
1439 current_inferior = save_inferior;
1440 }
1441
1442 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1443 PTRACE_GETREGSET. */
1444
1445 static void
1446 x86_linux_process_qsupported (const char *query)
1447 {
1448 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1449 with "i386" in qSupported query, it supports x86 XML target
1450 descriptions. */
1451 use_xml = 0;
1452 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1453 {
1454 char *copy = xstrdup (query + 13);
1455 char *p;
1456
1457 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1458 {
1459 if (strcmp (p, "i386") == 0)
1460 {
1461 use_xml = 1;
1462 break;
1463 }
1464 }
1465
1466 free (copy);
1467 }
1468
1469 x86_linux_update_xmltarget ();
1470 }
1471
1472 /* Common for x86/x86-64. */
1473
1474 static struct regsets_info x86_regsets_info =
1475 {
1476 x86_regsets, /* regsets */
1477 0, /* num_regsets */
1478 NULL, /* disabled_regsets */
1479 };
1480
1481 #ifdef __x86_64__
1482 static struct regs_info amd64_linux_regs_info =
1483 {
1484 NULL, /* regset_bitmap */
1485 NULL, /* usrregs_info */
1486 &x86_regsets_info
1487 };
1488 #endif
1489 static struct usrregs_info i386_linux_usrregs_info =
1490 {
1491 I386_NUM_REGS,
1492 i386_regmap,
1493 };
1494
1495 static struct regs_info i386_linux_regs_info =
1496 {
1497 NULL, /* regset_bitmap */
1498 &i386_linux_usrregs_info,
1499 &x86_regsets_info
1500 };
1501
1502 const struct regs_info *
1503 x86_linux_regs_info (void)
1504 {
1505 #ifdef __x86_64__
1506 if (is_64bit_tdesc ())
1507 return &amd64_linux_regs_info;
1508 else
1509 #endif
1510 return &i386_linux_regs_info;
1511 }
1512
1513 /* Initialize the target description for the architecture of the
1514 inferior. */
1515
1516 static void
1517 x86_arch_setup (void)
1518 {
1519 current_process ()->tdesc = x86_linux_read_description ();
1520 }
1521
1522 static int
1523 x86_supports_tracepoints (void)
1524 {
1525 return 1;
1526 }
1527
1528 static void
1529 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1530 {
1531 write_inferior_memory (*to, buf, len);
1532 *to += len;
1533 }
1534
1535 static int
1536 push_opcode (unsigned char *buf, char *op)
1537 {
1538 unsigned char *buf_org = buf;
1539
1540 while (1)
1541 {
1542 char *endptr;
1543 unsigned long ul = strtoul (op, &endptr, 16);
1544
1545 if (endptr == op)
1546 break;
1547
1548 *buf++ = ul;
1549 op = endptr;
1550 }
1551
1552 return buf - buf_org;
1553 }
1554
1555 #ifdef __x86_64__
1556
1557 /* Build a jump pad that saves registers and calls a collection
1558 function. Writes a jump instruction to the jump pad to
1559 JJUMPAD_INSN. The caller is responsible to write it in at the
1560 tracepoint address. */
1561
1562 static int
1563 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1564 CORE_ADDR collector,
1565 CORE_ADDR lockaddr,
1566 ULONGEST orig_size,
1567 CORE_ADDR *jump_entry,
1568 CORE_ADDR *trampoline,
1569 ULONGEST *trampoline_size,
1570 unsigned char *jjump_pad_insn,
1571 ULONGEST *jjump_pad_insn_size,
1572 CORE_ADDR *adjusted_insn_addr,
1573 CORE_ADDR *adjusted_insn_addr_end,
1574 char *err)
1575 {
1576 unsigned char buf[40];
1577 int i, offset;
1578 int64_t loffset;
1579
1580 CORE_ADDR buildaddr = *jump_entry;
1581
1582 /* Build the jump pad. */
1583
1584 /* First, do tracepoint data collection. Save registers. */
1585 i = 0;
1586 /* Need to ensure stack pointer saved first. */
1587 buf[i++] = 0x54; /* push %rsp */
1588 buf[i++] = 0x55; /* push %rbp */
1589 buf[i++] = 0x57; /* push %rdi */
1590 buf[i++] = 0x56; /* push %rsi */
1591 buf[i++] = 0x52; /* push %rdx */
1592 buf[i++] = 0x51; /* push %rcx */
1593 buf[i++] = 0x53; /* push %rbx */
1594 buf[i++] = 0x50; /* push %rax */
1595 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1596 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1597 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1598 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1599 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1600 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1601 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1602 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1603 buf[i++] = 0x9c; /* pushfq */
1604 buf[i++] = 0x48; /* movl <addr>,%rdi */
1605 buf[i++] = 0xbf;
1606 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1607 i += sizeof (unsigned long);
1608 buf[i++] = 0x57; /* push %rdi */
1609 append_insns (&buildaddr, i, buf);
1610
1611 /* Stack space for the collecting_t object. */
1612 i = 0;
1613 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1614 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1615 memcpy (buf + i, &tpoint, 8);
1616 i += 8;
1617 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1618 i += push_opcode (&buf[i],
1619 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1620 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1621 append_insns (&buildaddr, i, buf);
1622
1623 /* spin-lock. */
1624 i = 0;
1625 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1626 memcpy (&buf[i], (void *) &lockaddr, 8);
1627 i += 8;
1628 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1629 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1630 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1631 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1632 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1633 append_insns (&buildaddr, i, buf);
1634
1635 /* Set up the gdb_collect call. */
1636 /* At this point, (stack pointer + 0x18) is the base of our saved
1637 register block. */
1638
1639 i = 0;
1640 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1641 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1642
1643 /* tpoint address may be 64-bit wide. */
1644 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1645 memcpy (buf + i, &tpoint, 8);
1646 i += 8;
1647 append_insns (&buildaddr, i, buf);
1648
1649 /* The collector function being in the shared library, may be
1650 >31-bits away off the jump pad. */
1651 i = 0;
1652 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1653 memcpy (buf + i, &collector, 8);
1654 i += 8;
1655 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1656 append_insns (&buildaddr, i, buf);
1657
1658 /* Clear the spin-lock. */
1659 i = 0;
1660 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1661 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1662 memcpy (buf + i, &lockaddr, 8);
1663 i += 8;
1664 append_insns (&buildaddr, i, buf);
1665
1666 /* Remove stack that had been used for the collect_t object. */
1667 i = 0;
1668 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1669 append_insns (&buildaddr, i, buf);
1670
1671 /* Restore register state. */
1672 i = 0;
1673 buf[i++] = 0x48; /* add $0x8,%rsp */
1674 buf[i++] = 0x83;
1675 buf[i++] = 0xc4;
1676 buf[i++] = 0x08;
1677 buf[i++] = 0x9d; /* popfq */
1678 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1679 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1680 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1681 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1682 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1683 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1684 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1685 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1686 buf[i++] = 0x58; /* pop %rax */
1687 buf[i++] = 0x5b; /* pop %rbx */
1688 buf[i++] = 0x59; /* pop %rcx */
1689 buf[i++] = 0x5a; /* pop %rdx */
1690 buf[i++] = 0x5e; /* pop %rsi */
1691 buf[i++] = 0x5f; /* pop %rdi */
1692 buf[i++] = 0x5d; /* pop %rbp */
1693 buf[i++] = 0x5c; /* pop %rsp */
1694 append_insns (&buildaddr, i, buf);
1695
1696 /* Now, adjust the original instruction to execute in the jump
1697 pad. */
1698 *adjusted_insn_addr = buildaddr;
1699 relocate_instruction (&buildaddr, tpaddr);
1700 *adjusted_insn_addr_end = buildaddr;
1701
1702 /* Finally, write a jump back to the program. */
1703
1704 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1705 if (loffset > INT_MAX || loffset < INT_MIN)
1706 {
1707 sprintf (err,
1708 "E.Jump back from jump pad too far from tracepoint "
1709 "(offset 0x%" PRIx64 " > int32).", loffset);
1710 return 1;
1711 }
1712
1713 offset = (int) loffset;
1714 memcpy (buf, jump_insn, sizeof (jump_insn));
1715 memcpy (buf + 1, &offset, 4);
1716 append_insns (&buildaddr, sizeof (jump_insn), buf);
1717
1718 /* The jump pad is now built. Wire in a jump to our jump pad. This
1719 is always done last (by our caller actually), so that we can
1720 install fast tracepoints with threads running. This relies on
1721 the agent's atomic write support. */
1722 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1723 if (loffset > INT_MAX || loffset < INT_MIN)
1724 {
1725 sprintf (err,
1726 "E.Jump pad too far from tracepoint "
1727 "(offset 0x%" PRIx64 " > int32).", loffset);
1728 return 1;
1729 }
1730
1731 offset = (int) loffset;
1732
1733 memcpy (buf, jump_insn, sizeof (jump_insn));
1734 memcpy (buf + 1, &offset, 4);
1735 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1736 *jjump_pad_insn_size = sizeof (jump_insn);
1737
1738 /* Return the end address of our pad. */
1739 *jump_entry = buildaddr;
1740
1741 return 0;
1742 }
1743
1744 #endif /* __x86_64__ */
1745
1746 /* Build a jump pad that saves registers and calls a collection
1747 function. Writes a jump instruction to the jump pad to
1748 JJUMPAD_INSN. The caller is responsible to write it in at the
1749 tracepoint address. */
1750
1751 static int
1752 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1753 CORE_ADDR collector,
1754 CORE_ADDR lockaddr,
1755 ULONGEST orig_size,
1756 CORE_ADDR *jump_entry,
1757 CORE_ADDR *trampoline,
1758 ULONGEST *trampoline_size,
1759 unsigned char *jjump_pad_insn,
1760 ULONGEST *jjump_pad_insn_size,
1761 CORE_ADDR *adjusted_insn_addr,
1762 CORE_ADDR *adjusted_insn_addr_end,
1763 char *err)
1764 {
1765 unsigned char buf[0x100];
1766 int i, offset;
1767 CORE_ADDR buildaddr = *jump_entry;
1768
1769 /* Build the jump pad. */
1770
1771 /* First, do tracepoint data collection. Save registers. */
1772 i = 0;
1773 buf[i++] = 0x60; /* pushad */
1774 buf[i++] = 0x68; /* push tpaddr aka $pc */
1775 *((int *)(buf + i)) = (int) tpaddr;
1776 i += 4;
1777 buf[i++] = 0x9c; /* pushf */
1778 buf[i++] = 0x1e; /* push %ds */
1779 buf[i++] = 0x06; /* push %es */
1780 buf[i++] = 0x0f; /* push %fs */
1781 buf[i++] = 0xa0;
1782 buf[i++] = 0x0f; /* push %gs */
1783 buf[i++] = 0xa8;
1784 buf[i++] = 0x16; /* push %ss */
1785 buf[i++] = 0x0e; /* push %cs */
1786 append_insns (&buildaddr, i, buf);
1787
1788 /* Stack space for the collecting_t object. */
1789 i = 0;
1790 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1791
1792 /* Build the object. */
1793 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1794 memcpy (buf + i, &tpoint, 4);
1795 i += 4;
1796 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1797
1798 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1799 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1800 append_insns (&buildaddr, i, buf);
1801
1802 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1803 If we cared for it, this could be using xchg alternatively. */
1804
1805 i = 0;
1806 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1807 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1808 %esp,<lockaddr> */
1809 memcpy (&buf[i], (void *) &lockaddr, 4);
1810 i += 4;
1811 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1812 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1813 append_insns (&buildaddr, i, buf);
1814
1815
1816 /* Set up arguments to the gdb_collect call. */
1817 i = 0;
1818 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1819 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1820 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1821 append_insns (&buildaddr, i, buf);
1822
1823 i = 0;
1824 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1825 append_insns (&buildaddr, i, buf);
1826
1827 i = 0;
1828 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1829 memcpy (&buf[i], (void *) &tpoint, 4);
1830 i += 4;
1831 append_insns (&buildaddr, i, buf);
1832
1833 buf[0] = 0xe8; /* call <reladdr> */
1834 offset = collector - (buildaddr + sizeof (jump_insn));
1835 memcpy (buf + 1, &offset, 4);
1836 append_insns (&buildaddr, 5, buf);
1837 /* Clean up after the call. */
1838 buf[0] = 0x83; /* add $0x8,%esp */
1839 buf[1] = 0xc4;
1840 buf[2] = 0x08;
1841 append_insns (&buildaddr, 3, buf);
1842
1843
1844 /* Clear the spin-lock. This would need the LOCK prefix on older
1845 broken archs. */
1846 i = 0;
1847 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1848 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1849 memcpy (buf + i, &lockaddr, 4);
1850 i += 4;
1851 append_insns (&buildaddr, i, buf);
1852
1853
1854 /* Remove stack that had been used for the collect_t object. */
1855 i = 0;
1856 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1857 append_insns (&buildaddr, i, buf);
1858
1859 i = 0;
1860 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1861 buf[i++] = 0xc4;
1862 buf[i++] = 0x04;
1863 buf[i++] = 0x17; /* pop %ss */
1864 buf[i++] = 0x0f; /* pop %gs */
1865 buf[i++] = 0xa9;
1866 buf[i++] = 0x0f; /* pop %fs */
1867 buf[i++] = 0xa1;
1868 buf[i++] = 0x07; /* pop %es */
1869 buf[i++] = 0x1f; /* pop %ds */
1870 buf[i++] = 0x9d; /* popf */
1871 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1872 buf[i++] = 0xc4;
1873 buf[i++] = 0x04;
1874 buf[i++] = 0x61; /* popad */
1875 append_insns (&buildaddr, i, buf);
1876
1877 /* Now, adjust the original instruction to execute in the jump
1878 pad. */
1879 *adjusted_insn_addr = buildaddr;
1880 relocate_instruction (&buildaddr, tpaddr);
1881 *adjusted_insn_addr_end = buildaddr;
1882
1883 /* Write the jump back to the program. */
1884 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1885 memcpy (buf, jump_insn, sizeof (jump_insn));
1886 memcpy (buf + 1, &offset, 4);
1887 append_insns (&buildaddr, sizeof (jump_insn), buf);
1888
1889 /* The jump pad is now built. Wire in a jump to our jump pad. This
1890 is always done last (by our caller actually), so that we can
1891 install fast tracepoints with threads running. This relies on
1892 the agent's atomic write support. */
1893 if (orig_size == 4)
1894 {
1895 /* Create a trampoline. */
1896 *trampoline_size = sizeof (jump_insn);
1897 if (!claim_trampoline_space (*trampoline_size, trampoline))
1898 {
1899 /* No trampoline space available. */
1900 strcpy (err,
1901 "E.Cannot allocate trampoline space needed for fast "
1902 "tracepoints on 4-byte instructions.");
1903 return 1;
1904 }
1905
1906 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1907 memcpy (buf, jump_insn, sizeof (jump_insn));
1908 memcpy (buf + 1, &offset, 4);
1909 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1910
1911 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1912 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1913 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1914 memcpy (buf + 2, &offset, 2);
1915 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1916 *jjump_pad_insn_size = sizeof (small_jump_insn);
1917 }
1918 else
1919 {
1920 /* Else use a 32-bit relative jump instruction. */
1921 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1922 memcpy (buf, jump_insn, sizeof (jump_insn));
1923 memcpy (buf + 1, &offset, 4);
1924 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1925 *jjump_pad_insn_size = sizeof (jump_insn);
1926 }
1927
1928 /* Return the end address of our pad. */
1929 *jump_entry = buildaddr;
1930
1931 return 0;
1932 }
1933
1934 static int
1935 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1936 CORE_ADDR collector,
1937 CORE_ADDR lockaddr,
1938 ULONGEST orig_size,
1939 CORE_ADDR *jump_entry,
1940 CORE_ADDR *trampoline,
1941 ULONGEST *trampoline_size,
1942 unsigned char *jjump_pad_insn,
1943 ULONGEST *jjump_pad_insn_size,
1944 CORE_ADDR *adjusted_insn_addr,
1945 CORE_ADDR *adjusted_insn_addr_end,
1946 char *err)
1947 {
1948 #ifdef __x86_64__
1949 if (is_64bit_tdesc ())
1950 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1951 collector, lockaddr,
1952 orig_size, jump_entry,
1953 trampoline, trampoline_size,
1954 jjump_pad_insn,
1955 jjump_pad_insn_size,
1956 adjusted_insn_addr,
1957 adjusted_insn_addr_end,
1958 err);
1959 #endif
1960
1961 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1962 collector, lockaddr,
1963 orig_size, jump_entry,
1964 trampoline, trampoline_size,
1965 jjump_pad_insn,
1966 jjump_pad_insn_size,
1967 adjusted_insn_addr,
1968 adjusted_insn_addr_end,
1969 err);
1970 }
1971
1972 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1973 architectures. */
1974
1975 static int
1976 x86_get_min_fast_tracepoint_insn_len (void)
1977 {
1978 static int warned_about_fast_tracepoints = 0;
1979
1980 #ifdef __x86_64__
1981 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1982 used for fast tracepoints. */
1983 if (is_64bit_tdesc ())
1984 return 5;
1985 #endif
1986
1987 if (agent_loaded_p ())
1988 {
1989 char errbuf[IPA_BUFSIZ];
1990
1991 errbuf[0] = '\0';
1992
1993 /* On x86, if trampolines are available, then 4-byte jump instructions
1994 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1995 with a 4-byte offset are used instead. */
1996 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1997 return 4;
1998 else
1999 {
2000 /* GDB has no channel to explain to user why a shorter fast
2001 tracepoint is not possible, but at least make GDBserver
2002 mention that something has gone awry. */
2003 if (!warned_about_fast_tracepoints)
2004 {
2005 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2006 warned_about_fast_tracepoints = 1;
2007 }
2008 return 5;
2009 }
2010 }
2011 else
2012 {
2013 /* Indicate that the minimum length is currently unknown since the IPA
2014 has not loaded yet. */
2015 return 0;
2016 }
2017 }
2018
2019 static void
2020 add_insns (unsigned char *start, int len)
2021 {
2022 CORE_ADDR buildaddr = current_insn_ptr;
2023
2024 if (debug_threads)
2025 fprintf (stderr, "Adding %d bytes of insn at %s\n",
2026 len, paddress (buildaddr));
2027
2028 append_insns (&buildaddr, len, start);
2029 current_insn_ptr = buildaddr;
2030 }
2031
2032 /* Our general strategy for emitting code is to avoid specifying raw
2033 bytes whenever possible, and instead copy a block of inline asm
2034 that is embedded in the function. This is a little messy, because
2035 we need to keep the compiler from discarding what looks like dead
2036 code, plus suppress various warnings. */
2037
2038 #define EMIT_ASM(NAME, INSNS) \
2039 do \
2040 { \
2041 extern unsigned char start_ ## NAME, end_ ## NAME; \
2042 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2043 __asm__ ("jmp end_" #NAME "\n" \
2044 "\t" "start_" #NAME ":" \
2045 "\t" INSNS "\n" \
2046 "\t" "end_" #NAME ":"); \
2047 } while (0)
2048
2049 #ifdef __x86_64__
2050
2051 #define EMIT_ASM32(NAME,INSNS) \
2052 do \
2053 { \
2054 extern unsigned char start_ ## NAME, end_ ## NAME; \
2055 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2056 __asm__ (".code32\n" \
2057 "\t" "jmp end_" #NAME "\n" \
2058 "\t" "start_" #NAME ":\n" \
2059 "\t" INSNS "\n" \
2060 "\t" "end_" #NAME ":\n" \
2061 ".code64\n"); \
2062 } while (0)
2063
2064 #else
2065
2066 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2067
2068 #endif
2069
2070 #ifdef __x86_64__
2071
2072 static void
2073 amd64_emit_prologue (void)
2074 {
2075 EMIT_ASM (amd64_prologue,
2076 "pushq %rbp\n\t"
2077 "movq %rsp,%rbp\n\t"
2078 "sub $0x20,%rsp\n\t"
2079 "movq %rdi,-8(%rbp)\n\t"
2080 "movq %rsi,-16(%rbp)");
2081 }
2082
2083
2084 static void
2085 amd64_emit_epilogue (void)
2086 {
2087 EMIT_ASM (amd64_epilogue,
2088 "movq -16(%rbp),%rdi\n\t"
2089 "movq %rax,(%rdi)\n\t"
2090 "xor %rax,%rax\n\t"
2091 "leave\n\t"
2092 "ret");
2093 }
2094
2095 static void
2096 amd64_emit_add (void)
2097 {
2098 EMIT_ASM (amd64_add,
2099 "add (%rsp),%rax\n\t"
2100 "lea 0x8(%rsp),%rsp");
2101 }
2102
2103 static void
2104 amd64_emit_sub (void)
2105 {
2106 EMIT_ASM (amd64_sub,
2107 "sub %rax,(%rsp)\n\t"
2108 "pop %rax");
2109 }
2110
2111 static void
2112 amd64_emit_mul (void)
2113 {
2114 emit_error = 1;
2115 }
2116
2117 static void
2118 amd64_emit_lsh (void)
2119 {
2120 emit_error = 1;
2121 }
2122
2123 static void
2124 amd64_emit_rsh_signed (void)
2125 {
2126 emit_error = 1;
2127 }
2128
2129 static void
2130 amd64_emit_rsh_unsigned (void)
2131 {
2132 emit_error = 1;
2133 }
2134
2135 static void
2136 amd64_emit_ext (int arg)
2137 {
2138 switch (arg)
2139 {
2140 case 8:
2141 EMIT_ASM (amd64_ext_8,
2142 "cbtw\n\t"
2143 "cwtl\n\t"
2144 "cltq");
2145 break;
2146 case 16:
2147 EMIT_ASM (amd64_ext_16,
2148 "cwtl\n\t"
2149 "cltq");
2150 break;
2151 case 32:
2152 EMIT_ASM (amd64_ext_32,
2153 "cltq");
2154 break;
2155 default:
2156 emit_error = 1;
2157 }
2158 }
2159
2160 static void
2161 amd64_emit_log_not (void)
2162 {
2163 EMIT_ASM (amd64_log_not,
2164 "test %rax,%rax\n\t"
2165 "sete %cl\n\t"
2166 "movzbq %cl,%rax");
2167 }
2168
2169 static void
2170 amd64_emit_bit_and (void)
2171 {
2172 EMIT_ASM (amd64_and,
2173 "and (%rsp),%rax\n\t"
2174 "lea 0x8(%rsp),%rsp");
2175 }
2176
2177 static void
2178 amd64_emit_bit_or (void)
2179 {
2180 EMIT_ASM (amd64_or,
2181 "or (%rsp),%rax\n\t"
2182 "lea 0x8(%rsp),%rsp");
2183 }
2184
2185 static void
2186 amd64_emit_bit_xor (void)
2187 {
2188 EMIT_ASM (amd64_xor,
2189 "xor (%rsp),%rax\n\t"
2190 "lea 0x8(%rsp),%rsp");
2191 }
2192
2193 static void
2194 amd64_emit_bit_not (void)
2195 {
2196 EMIT_ASM (amd64_bit_not,
2197 "xorq $0xffffffffffffffff,%rax");
2198 }
2199
2200 static void
2201 amd64_emit_equal (void)
2202 {
2203 EMIT_ASM (amd64_equal,
2204 "cmp %rax,(%rsp)\n\t"
2205 "je .Lamd64_equal_true\n\t"
2206 "xor %rax,%rax\n\t"
2207 "jmp .Lamd64_equal_end\n\t"
2208 ".Lamd64_equal_true:\n\t"
2209 "mov $0x1,%rax\n\t"
2210 ".Lamd64_equal_end:\n\t"
2211 "lea 0x8(%rsp),%rsp");
2212 }
2213
2214 static void
2215 amd64_emit_less_signed (void)
2216 {
2217 EMIT_ASM (amd64_less_signed,
2218 "cmp %rax,(%rsp)\n\t"
2219 "jl .Lamd64_less_signed_true\n\t"
2220 "xor %rax,%rax\n\t"
2221 "jmp .Lamd64_less_signed_end\n\t"
2222 ".Lamd64_less_signed_true:\n\t"
2223 "mov $1,%rax\n\t"
2224 ".Lamd64_less_signed_end:\n\t"
2225 "lea 0x8(%rsp),%rsp");
2226 }
2227
2228 static void
2229 amd64_emit_less_unsigned (void)
2230 {
2231 EMIT_ASM (amd64_less_unsigned,
2232 "cmp %rax,(%rsp)\n\t"
2233 "jb .Lamd64_less_unsigned_true\n\t"
2234 "xor %rax,%rax\n\t"
2235 "jmp .Lamd64_less_unsigned_end\n\t"
2236 ".Lamd64_less_unsigned_true:\n\t"
2237 "mov $1,%rax\n\t"
2238 ".Lamd64_less_unsigned_end:\n\t"
2239 "lea 0x8(%rsp),%rsp");
2240 }
2241
2242 static void
2243 amd64_emit_ref (int size)
2244 {
2245 switch (size)
2246 {
2247 case 1:
2248 EMIT_ASM (amd64_ref1,
2249 "movb (%rax),%al");
2250 break;
2251 case 2:
2252 EMIT_ASM (amd64_ref2,
2253 "movw (%rax),%ax");
2254 break;
2255 case 4:
2256 EMIT_ASM (amd64_ref4,
2257 "movl (%rax),%eax");
2258 break;
2259 case 8:
2260 EMIT_ASM (amd64_ref8,
2261 "movq (%rax),%rax");
2262 break;
2263 }
2264 }
2265
2266 static void
2267 amd64_emit_if_goto (int *offset_p, int *size_p)
2268 {
2269 EMIT_ASM (amd64_if_goto,
2270 "mov %rax,%rcx\n\t"
2271 "pop %rax\n\t"
2272 "cmp $0,%rcx\n\t"
2273 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2274 if (offset_p)
2275 *offset_p = 10;
2276 if (size_p)
2277 *size_p = 4;
2278 }
2279
2280 static void
2281 amd64_emit_goto (int *offset_p, int *size_p)
2282 {
2283 EMIT_ASM (amd64_goto,
2284 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2285 if (offset_p)
2286 *offset_p = 1;
2287 if (size_p)
2288 *size_p = 4;
2289 }
2290
2291 static void
2292 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2293 {
2294 int diff = (to - (from + size));
2295 unsigned char buf[sizeof (int)];
2296
2297 if (size != 4)
2298 {
2299 emit_error = 1;
2300 return;
2301 }
2302
2303 memcpy (buf, &diff, sizeof (int));
2304 write_inferior_memory (from, buf, sizeof (int));
2305 }
2306
2307 static void
2308 amd64_emit_const (LONGEST num)
2309 {
2310 unsigned char buf[16];
2311 int i;
2312 CORE_ADDR buildaddr = current_insn_ptr;
2313
2314 i = 0;
2315 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2316 memcpy (&buf[i], &num, sizeof (num));
2317 i += 8;
2318 append_insns (&buildaddr, i, buf);
2319 current_insn_ptr = buildaddr;
2320 }
2321
2322 static void
2323 amd64_emit_call (CORE_ADDR fn)
2324 {
2325 unsigned char buf[16];
2326 int i;
2327 CORE_ADDR buildaddr;
2328 LONGEST offset64;
2329
2330 /* The destination function being in the shared library, may be
2331 >31-bits away off the compiled code pad. */
2332
2333 buildaddr = current_insn_ptr;
2334
2335 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2336
2337 i = 0;
2338
2339 if (offset64 > INT_MAX || offset64 < INT_MIN)
2340 {
2341 /* Offset is too large for a call. Use callq, but that requires
2342 a register, so avoid it if possible. Use r10, since it is
2343 call-clobbered, we don't have to push/pop it. */
2344 buf[i++] = 0x48; /* mov $fn,%r10 */
2345 buf[i++] = 0xba;
2346 memcpy (buf + i, &fn, 8);
2347 i += 8;
2348 buf[i++] = 0xff; /* callq *%r10 */
2349 buf[i++] = 0xd2;
2350 }
2351 else
2352 {
2353 int offset32 = offset64; /* we know we can't overflow here. */
2354 memcpy (buf + i, &offset32, 4);
2355 i += 4;
2356 }
2357
2358 append_insns (&buildaddr, i, buf);
2359 current_insn_ptr = buildaddr;
2360 }
2361
2362 static void
2363 amd64_emit_reg (int reg)
2364 {
2365 unsigned char buf[16];
2366 int i;
2367 CORE_ADDR buildaddr;
2368
2369 /* Assume raw_regs is still in %rdi. */
2370 buildaddr = current_insn_ptr;
2371 i = 0;
2372 buf[i++] = 0xbe; /* mov $<n>,%esi */
2373 memcpy (&buf[i], &reg, sizeof (reg));
2374 i += 4;
2375 append_insns (&buildaddr, i, buf);
2376 current_insn_ptr = buildaddr;
2377 amd64_emit_call (get_raw_reg_func_addr ());
2378 }
2379
2380 static void
2381 amd64_emit_pop (void)
2382 {
2383 EMIT_ASM (amd64_pop,
2384 "pop %rax");
2385 }
2386
2387 static void
2388 amd64_emit_stack_flush (void)
2389 {
2390 EMIT_ASM (amd64_stack_flush,
2391 "push %rax");
2392 }
2393
2394 static void
2395 amd64_emit_zero_ext (int arg)
2396 {
2397 switch (arg)
2398 {
2399 case 8:
2400 EMIT_ASM (amd64_zero_ext_8,
2401 "and $0xff,%rax");
2402 break;
2403 case 16:
2404 EMIT_ASM (amd64_zero_ext_16,
2405 "and $0xffff,%rax");
2406 break;
2407 case 32:
2408 EMIT_ASM (amd64_zero_ext_32,
2409 "mov $0xffffffff,%rcx\n\t"
2410 "and %rcx,%rax");
2411 break;
2412 default:
2413 emit_error = 1;
2414 }
2415 }
2416
2417 static void
2418 amd64_emit_swap (void)
2419 {
2420 EMIT_ASM (amd64_swap,
2421 "mov %rax,%rcx\n\t"
2422 "pop %rax\n\t"
2423 "push %rcx");
2424 }
2425
2426 static void
2427 amd64_emit_stack_adjust (int n)
2428 {
2429 unsigned char buf[16];
2430 int i;
2431 CORE_ADDR buildaddr = current_insn_ptr;
2432
2433 i = 0;
2434 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2435 buf[i++] = 0x8d;
2436 buf[i++] = 0x64;
2437 buf[i++] = 0x24;
2438 /* This only handles adjustments up to 16, but we don't expect any more. */
2439 buf[i++] = n * 8;
2440 append_insns (&buildaddr, i, buf);
2441 current_insn_ptr = buildaddr;
2442 }
2443
2444 /* FN's prototype is `LONGEST(*fn)(int)'. */
2445
2446 static void
2447 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2448 {
2449 unsigned char buf[16];
2450 int i;
2451 CORE_ADDR buildaddr;
2452
2453 buildaddr = current_insn_ptr;
2454 i = 0;
2455 buf[i++] = 0xbf; /* movl $<n>,%edi */
2456 memcpy (&buf[i], &arg1, sizeof (arg1));
2457 i += 4;
2458 append_insns (&buildaddr, i, buf);
2459 current_insn_ptr = buildaddr;
2460 amd64_emit_call (fn);
2461 }
2462
2463 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2464
2465 static void
2466 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2467 {
2468 unsigned char buf[16];
2469 int i;
2470 CORE_ADDR buildaddr;
2471
2472 buildaddr = current_insn_ptr;
2473 i = 0;
2474 buf[i++] = 0xbf; /* movl $<n>,%edi */
2475 memcpy (&buf[i], &arg1, sizeof (arg1));
2476 i += 4;
2477 append_insns (&buildaddr, i, buf);
2478 current_insn_ptr = buildaddr;
2479 EMIT_ASM (amd64_void_call_2_a,
2480 /* Save away a copy of the stack top. */
2481 "push %rax\n\t"
2482 /* Also pass top as the second argument. */
2483 "mov %rax,%rsi");
2484 amd64_emit_call (fn);
2485 EMIT_ASM (amd64_void_call_2_b,
2486 /* Restore the stack top, %rax may have been trashed. */
2487 "pop %rax");
2488 }
2489
2490 void
2491 amd64_emit_eq_goto (int *offset_p, int *size_p)
2492 {
2493 EMIT_ASM (amd64_eq,
2494 "cmp %rax,(%rsp)\n\t"
2495 "jne .Lamd64_eq_fallthru\n\t"
2496 "lea 0x8(%rsp),%rsp\n\t"
2497 "pop %rax\n\t"
2498 /* jmp, but don't trust the assembler to choose the right jump */
2499 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2500 ".Lamd64_eq_fallthru:\n\t"
2501 "lea 0x8(%rsp),%rsp\n\t"
2502 "pop %rax");
2503
2504 if (offset_p)
2505 *offset_p = 13;
2506 if (size_p)
2507 *size_p = 4;
2508 }
2509
2510 void
2511 amd64_emit_ne_goto (int *offset_p, int *size_p)
2512 {
2513 EMIT_ASM (amd64_ne,
2514 "cmp %rax,(%rsp)\n\t"
2515 "je .Lamd64_ne_fallthru\n\t"
2516 "lea 0x8(%rsp),%rsp\n\t"
2517 "pop %rax\n\t"
2518 /* jmp, but don't trust the assembler to choose the right jump */
2519 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2520 ".Lamd64_ne_fallthru:\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2522 "pop %rax");
2523
2524 if (offset_p)
2525 *offset_p = 13;
2526 if (size_p)
2527 *size_p = 4;
2528 }
2529
2530 void
2531 amd64_emit_lt_goto (int *offset_p, int *size_p)
2532 {
2533 EMIT_ASM (amd64_lt,
2534 "cmp %rax,(%rsp)\n\t"
2535 "jnl .Lamd64_lt_fallthru\n\t"
2536 "lea 0x8(%rsp),%rsp\n\t"
2537 "pop %rax\n\t"
2538 /* jmp, but don't trust the assembler to choose the right jump */
2539 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2540 ".Lamd64_lt_fallthru:\n\t"
2541 "lea 0x8(%rsp),%rsp\n\t"
2542 "pop %rax");
2543
2544 if (offset_p)
2545 *offset_p = 13;
2546 if (size_p)
2547 *size_p = 4;
2548 }
2549
2550 void
2551 amd64_emit_le_goto (int *offset_p, int *size_p)
2552 {
2553 EMIT_ASM (amd64_le,
2554 "cmp %rax,(%rsp)\n\t"
2555 "jnle .Lamd64_le_fallthru\n\t"
2556 "lea 0x8(%rsp),%rsp\n\t"
2557 "pop %rax\n\t"
2558 /* jmp, but don't trust the assembler to choose the right jump */
2559 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2560 ".Lamd64_le_fallthru:\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2562 "pop %rax");
2563
2564 if (offset_p)
2565 *offset_p = 13;
2566 if (size_p)
2567 *size_p = 4;
2568 }
2569
2570 void
2571 amd64_emit_gt_goto (int *offset_p, int *size_p)
2572 {
2573 EMIT_ASM (amd64_gt,
2574 "cmp %rax,(%rsp)\n\t"
2575 "jng .Lamd64_gt_fallthru\n\t"
2576 "lea 0x8(%rsp),%rsp\n\t"
2577 "pop %rax\n\t"
2578 /* jmp, but don't trust the assembler to choose the right jump */
2579 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2580 ".Lamd64_gt_fallthru:\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2582 "pop %rax");
2583
2584 if (offset_p)
2585 *offset_p = 13;
2586 if (size_p)
2587 *size_p = 4;
2588 }
2589
2590 void
2591 amd64_emit_ge_goto (int *offset_p, int *size_p)
2592 {
2593 EMIT_ASM (amd64_ge,
2594 "cmp %rax,(%rsp)\n\t"
2595 "jnge .Lamd64_ge_fallthru\n\t"
2596 ".Lamd64_ge_jump:\n\t"
2597 "lea 0x8(%rsp),%rsp\n\t"
2598 "pop %rax\n\t"
2599 /* jmp, but don't trust the assembler to choose the right jump */
2600 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2601 ".Lamd64_ge_fallthru:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2603 "pop %rax");
2604
2605 if (offset_p)
2606 *offset_p = 13;
2607 if (size_p)
2608 *size_p = 4;
2609 }
2610
2611 struct emit_ops amd64_emit_ops =
2612 {
2613 amd64_emit_prologue,
2614 amd64_emit_epilogue,
2615 amd64_emit_add,
2616 amd64_emit_sub,
2617 amd64_emit_mul,
2618 amd64_emit_lsh,
2619 amd64_emit_rsh_signed,
2620 amd64_emit_rsh_unsigned,
2621 amd64_emit_ext,
2622 amd64_emit_log_not,
2623 amd64_emit_bit_and,
2624 amd64_emit_bit_or,
2625 amd64_emit_bit_xor,
2626 amd64_emit_bit_not,
2627 amd64_emit_equal,
2628 amd64_emit_less_signed,
2629 amd64_emit_less_unsigned,
2630 amd64_emit_ref,
2631 amd64_emit_if_goto,
2632 amd64_emit_goto,
2633 amd64_write_goto_address,
2634 amd64_emit_const,
2635 amd64_emit_call,
2636 amd64_emit_reg,
2637 amd64_emit_pop,
2638 amd64_emit_stack_flush,
2639 amd64_emit_zero_ext,
2640 amd64_emit_swap,
2641 amd64_emit_stack_adjust,
2642 amd64_emit_int_call_1,
2643 amd64_emit_void_call_2,
2644 amd64_emit_eq_goto,
2645 amd64_emit_ne_goto,
2646 amd64_emit_lt_goto,
2647 amd64_emit_le_goto,
2648 amd64_emit_gt_goto,
2649 amd64_emit_ge_goto
2650 };
2651
2652 #endif /* __x86_64__ */
2653
2654 static void
2655 i386_emit_prologue (void)
2656 {
2657 EMIT_ASM32 (i386_prologue,
2658 "push %ebp\n\t"
2659 "mov %esp,%ebp\n\t"
2660 "push %ebx");
2661 /* At this point, the raw regs base address is at 8(%ebp), and the
2662 value pointer is at 12(%ebp). */
2663 }
2664
2665 static void
2666 i386_emit_epilogue (void)
2667 {
2668 EMIT_ASM32 (i386_epilogue,
2669 "mov 12(%ebp),%ecx\n\t"
2670 "mov %eax,(%ecx)\n\t"
2671 "mov %ebx,0x4(%ecx)\n\t"
2672 "xor %eax,%eax\n\t"
2673 "pop %ebx\n\t"
2674 "pop %ebp\n\t"
2675 "ret");
2676 }
2677
2678 static void
2679 i386_emit_add (void)
2680 {
2681 EMIT_ASM32 (i386_add,
2682 "add (%esp),%eax\n\t"
2683 "adc 0x4(%esp),%ebx\n\t"
2684 "lea 0x8(%esp),%esp");
2685 }
2686
2687 static void
2688 i386_emit_sub (void)
2689 {
2690 EMIT_ASM32 (i386_sub,
2691 "subl %eax,(%esp)\n\t"
2692 "sbbl %ebx,4(%esp)\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx\n\t");
2695 }
2696
2697 static void
2698 i386_emit_mul (void)
2699 {
2700 emit_error = 1;
2701 }
2702
2703 static void
2704 i386_emit_lsh (void)
2705 {
2706 emit_error = 1;
2707 }
2708
2709 static void
2710 i386_emit_rsh_signed (void)
2711 {
2712 emit_error = 1;
2713 }
2714
2715 static void
2716 i386_emit_rsh_unsigned (void)
2717 {
2718 emit_error = 1;
2719 }
2720
2721 static void
2722 i386_emit_ext (int arg)
2723 {
2724 switch (arg)
2725 {
2726 case 8:
2727 EMIT_ASM32 (i386_ext_8,
2728 "cbtw\n\t"
2729 "cwtl\n\t"
2730 "movl %eax,%ebx\n\t"
2731 "sarl $31,%ebx");
2732 break;
2733 case 16:
2734 EMIT_ASM32 (i386_ext_16,
2735 "cwtl\n\t"
2736 "movl %eax,%ebx\n\t"
2737 "sarl $31,%ebx");
2738 break;
2739 case 32:
2740 EMIT_ASM32 (i386_ext_32,
2741 "movl %eax,%ebx\n\t"
2742 "sarl $31,%ebx");
2743 break;
2744 default:
2745 emit_error = 1;
2746 }
2747 }
2748
2749 static void
2750 i386_emit_log_not (void)
2751 {
2752 EMIT_ASM32 (i386_log_not,
2753 "or %ebx,%eax\n\t"
2754 "test %eax,%eax\n\t"
2755 "sete %cl\n\t"
2756 "xor %ebx,%ebx\n\t"
2757 "movzbl %cl,%eax");
2758 }
2759
2760 static void
2761 i386_emit_bit_and (void)
2762 {
2763 EMIT_ASM32 (i386_and,
2764 "and (%esp),%eax\n\t"
2765 "and 0x4(%esp),%ebx\n\t"
2766 "lea 0x8(%esp),%esp");
2767 }
2768
2769 static void
2770 i386_emit_bit_or (void)
2771 {
2772 EMIT_ASM32 (i386_or,
2773 "or (%esp),%eax\n\t"
2774 "or 0x4(%esp),%ebx\n\t"
2775 "lea 0x8(%esp),%esp");
2776 }
2777
2778 static void
2779 i386_emit_bit_xor (void)
2780 {
2781 EMIT_ASM32 (i386_xor,
2782 "xor (%esp),%eax\n\t"
2783 "xor 0x4(%esp),%ebx\n\t"
2784 "lea 0x8(%esp),%esp");
2785 }
2786
2787 static void
2788 i386_emit_bit_not (void)
2789 {
2790 EMIT_ASM32 (i386_bit_not,
2791 "xor $0xffffffff,%eax\n\t"
2792 "xor $0xffffffff,%ebx\n\t");
2793 }
2794
2795 static void
2796 i386_emit_equal (void)
2797 {
2798 EMIT_ASM32 (i386_equal,
2799 "cmpl %ebx,4(%esp)\n\t"
2800 "jne .Li386_equal_false\n\t"
2801 "cmpl %eax,(%esp)\n\t"
2802 "je .Li386_equal_true\n\t"
2803 ".Li386_equal_false:\n\t"
2804 "xor %eax,%eax\n\t"
2805 "jmp .Li386_equal_end\n\t"
2806 ".Li386_equal_true:\n\t"
2807 "mov $1,%eax\n\t"
2808 ".Li386_equal_end:\n\t"
2809 "xor %ebx,%ebx\n\t"
2810 "lea 0x8(%esp),%esp");
2811 }
2812
2813 static void
2814 i386_emit_less_signed (void)
2815 {
2816 EMIT_ASM32 (i386_less_signed,
2817 "cmpl %ebx,4(%esp)\n\t"
2818 "jl .Li386_less_signed_true\n\t"
2819 "jne .Li386_less_signed_false\n\t"
2820 "cmpl %eax,(%esp)\n\t"
2821 "jl .Li386_less_signed_true\n\t"
2822 ".Li386_less_signed_false:\n\t"
2823 "xor %eax,%eax\n\t"
2824 "jmp .Li386_less_signed_end\n\t"
2825 ".Li386_less_signed_true:\n\t"
2826 "mov $1,%eax\n\t"
2827 ".Li386_less_signed_end:\n\t"
2828 "xor %ebx,%ebx\n\t"
2829 "lea 0x8(%esp),%esp");
2830 }
2831
2832 static void
2833 i386_emit_less_unsigned (void)
2834 {
2835 EMIT_ASM32 (i386_less_unsigned,
2836 "cmpl %ebx,4(%esp)\n\t"
2837 "jb .Li386_less_unsigned_true\n\t"
2838 "jne .Li386_less_unsigned_false\n\t"
2839 "cmpl %eax,(%esp)\n\t"
2840 "jb .Li386_less_unsigned_true\n\t"
2841 ".Li386_less_unsigned_false:\n\t"
2842 "xor %eax,%eax\n\t"
2843 "jmp .Li386_less_unsigned_end\n\t"
2844 ".Li386_less_unsigned_true:\n\t"
2845 "mov $1,%eax\n\t"
2846 ".Li386_less_unsigned_end:\n\t"
2847 "xor %ebx,%ebx\n\t"
2848 "lea 0x8(%esp),%esp");
2849 }
2850
2851 static void
2852 i386_emit_ref (int size)
2853 {
2854 switch (size)
2855 {
2856 case 1:
2857 EMIT_ASM32 (i386_ref1,
2858 "movb (%eax),%al");
2859 break;
2860 case 2:
2861 EMIT_ASM32 (i386_ref2,
2862 "movw (%eax),%ax");
2863 break;
2864 case 4:
2865 EMIT_ASM32 (i386_ref4,
2866 "movl (%eax),%eax");
2867 break;
2868 case 8:
2869 EMIT_ASM32 (i386_ref8,
2870 "movl 4(%eax),%ebx\n\t"
2871 "movl (%eax),%eax");
2872 break;
2873 }
2874 }
2875
2876 static void
2877 i386_emit_if_goto (int *offset_p, int *size_p)
2878 {
2879 EMIT_ASM32 (i386_if_goto,
2880 "mov %eax,%ecx\n\t"
2881 "or %ebx,%ecx\n\t"
2882 "pop %eax\n\t"
2883 "pop %ebx\n\t"
2884 "cmpl $0,%ecx\n\t"
2885 /* Don't trust the assembler to choose the right jump */
2886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2887
2888 if (offset_p)
2889 *offset_p = 11; /* be sure that this matches the sequence above */
2890 if (size_p)
2891 *size_p = 4;
2892 }
2893
2894 static void
2895 i386_emit_goto (int *offset_p, int *size_p)
2896 {
2897 EMIT_ASM32 (i386_goto,
2898 /* Don't trust the assembler to choose the right jump */
2899 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2900 if (offset_p)
2901 *offset_p = 1;
2902 if (size_p)
2903 *size_p = 4;
2904 }
2905
2906 static void
2907 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2908 {
2909 int diff = (to - (from + size));
2910 unsigned char buf[sizeof (int)];
2911
2912 /* We're only doing 4-byte sizes at the moment. */
2913 if (size != 4)
2914 {
2915 emit_error = 1;
2916 return;
2917 }
2918
2919 memcpy (buf, &diff, sizeof (int));
2920 write_inferior_memory (from, buf, sizeof (int));
2921 }
2922
2923 static void
2924 i386_emit_const (LONGEST num)
2925 {
2926 unsigned char buf[16];
2927 int i, hi, lo;
2928 CORE_ADDR buildaddr = current_insn_ptr;
2929
2930 i = 0;
2931 buf[i++] = 0xb8; /* mov $<n>,%eax */
2932 lo = num & 0xffffffff;
2933 memcpy (&buf[i], &lo, sizeof (lo));
2934 i += 4;
2935 hi = ((num >> 32) & 0xffffffff);
2936 if (hi)
2937 {
2938 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2939 memcpy (&buf[i], &hi, sizeof (hi));
2940 i += 4;
2941 }
2942 else
2943 {
2944 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2945 }
2946 append_insns (&buildaddr, i, buf);
2947 current_insn_ptr = buildaddr;
2948 }
2949
2950 static void
2951 i386_emit_call (CORE_ADDR fn)
2952 {
2953 unsigned char buf[16];
2954 int i, offset;
2955 CORE_ADDR buildaddr;
2956
2957 buildaddr = current_insn_ptr;
2958 i = 0;
2959 buf[i++] = 0xe8; /* call <reladdr> */
2960 offset = ((int) fn) - (buildaddr + 5);
2961 memcpy (buf + 1, &offset, 4);
2962 append_insns (&buildaddr, 5, buf);
2963 current_insn_ptr = buildaddr;
2964 }
2965
2966 static void
2967 i386_emit_reg (int reg)
2968 {
2969 unsigned char buf[16];
2970 int i;
2971 CORE_ADDR buildaddr;
2972
2973 EMIT_ASM32 (i386_reg_a,
2974 "sub $0x8,%esp");
2975 buildaddr = current_insn_ptr;
2976 i = 0;
2977 buf[i++] = 0xb8; /* mov $<n>,%eax */
2978 memcpy (&buf[i], &reg, sizeof (reg));
2979 i += 4;
2980 append_insns (&buildaddr, i, buf);
2981 current_insn_ptr = buildaddr;
2982 EMIT_ASM32 (i386_reg_b,
2983 "mov %eax,4(%esp)\n\t"
2984 "mov 8(%ebp),%eax\n\t"
2985 "mov %eax,(%esp)");
2986 i386_emit_call (get_raw_reg_func_addr ());
2987 EMIT_ASM32 (i386_reg_c,
2988 "xor %ebx,%ebx\n\t"
2989 "lea 0x8(%esp),%esp");
2990 }
2991
2992 static void
2993 i386_emit_pop (void)
2994 {
2995 EMIT_ASM32 (i386_pop,
2996 "pop %eax\n\t"
2997 "pop %ebx");
2998 }
2999
3000 static void
3001 i386_emit_stack_flush (void)
3002 {
3003 EMIT_ASM32 (i386_stack_flush,
3004 "push %ebx\n\t"
3005 "push %eax");
3006 }
3007
3008 static void
3009 i386_emit_zero_ext (int arg)
3010 {
3011 switch (arg)
3012 {
3013 case 8:
3014 EMIT_ASM32 (i386_zero_ext_8,
3015 "and $0xff,%eax\n\t"
3016 "xor %ebx,%ebx");
3017 break;
3018 case 16:
3019 EMIT_ASM32 (i386_zero_ext_16,
3020 "and $0xffff,%eax\n\t"
3021 "xor %ebx,%ebx");
3022 break;
3023 case 32:
3024 EMIT_ASM32 (i386_zero_ext_32,
3025 "xor %ebx,%ebx");
3026 break;
3027 default:
3028 emit_error = 1;
3029 }
3030 }
3031
3032 static void
3033 i386_emit_swap (void)
3034 {
3035 EMIT_ASM32 (i386_swap,
3036 "mov %eax,%ecx\n\t"
3037 "mov %ebx,%edx\n\t"
3038 "pop %eax\n\t"
3039 "pop %ebx\n\t"
3040 "push %edx\n\t"
3041 "push %ecx");
3042 }
3043
3044 static void
3045 i386_emit_stack_adjust (int n)
3046 {
3047 unsigned char buf[16];
3048 int i;
3049 CORE_ADDR buildaddr = current_insn_ptr;
3050
3051 i = 0;
3052 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3053 buf[i++] = 0x64;
3054 buf[i++] = 0x24;
3055 buf[i++] = n * 8;
3056 append_insns (&buildaddr, i, buf);
3057 current_insn_ptr = buildaddr;
3058 }
3059
3060 /* FN's prototype is `LONGEST(*fn)(int)'. */
3061
3062 static void
3063 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3064 {
3065 unsigned char buf[16];
3066 int i;
3067 CORE_ADDR buildaddr;
3068
3069 EMIT_ASM32 (i386_int_call_1_a,
3070 /* Reserve a bit of stack space. */
3071 "sub $0x8,%esp");
3072 /* Put the one argument on the stack. */
3073 buildaddr = current_insn_ptr;
3074 i = 0;
3075 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3076 buf[i++] = 0x04;
3077 buf[i++] = 0x24;
3078 memcpy (&buf[i], &arg1, sizeof (arg1));
3079 i += 4;
3080 append_insns (&buildaddr, i, buf);
3081 current_insn_ptr = buildaddr;
3082 i386_emit_call (fn);
3083 EMIT_ASM32 (i386_int_call_1_c,
3084 "mov %edx,%ebx\n\t"
3085 "lea 0x8(%esp),%esp");
3086 }
3087
3088 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3089
3090 static void
3091 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3092 {
3093 unsigned char buf[16];
3094 int i;
3095 CORE_ADDR buildaddr;
3096
3097 EMIT_ASM32 (i386_void_call_2_a,
3098 /* Preserve %eax only; we don't have to worry about %ebx. */
3099 "push %eax\n\t"
3100 /* Reserve a bit of stack space for arguments. */
3101 "sub $0x10,%esp\n\t"
3102 /* Copy "top" to the second argument position. (Note that
3103 we can't assume function won't scribble on its
3104 arguments, so don't try to restore from this.) */
3105 "mov %eax,4(%esp)\n\t"
3106 "mov %ebx,8(%esp)");
3107 /* Put the first argument on the stack. */
3108 buildaddr = current_insn_ptr;
3109 i = 0;
3110 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3111 buf[i++] = 0x04;
3112 buf[i++] = 0x24;
3113 memcpy (&buf[i], &arg1, sizeof (arg1));
3114 i += 4;
3115 append_insns (&buildaddr, i, buf);
3116 current_insn_ptr = buildaddr;
3117 i386_emit_call (fn);
3118 EMIT_ASM32 (i386_void_call_2_b,
3119 "lea 0x10(%esp),%esp\n\t"
3120 /* Restore original stack top. */
3121 "pop %eax");
3122 }
3123
3124
3125 void
3126 i386_emit_eq_goto (int *offset_p, int *size_p)
3127 {
3128 EMIT_ASM32 (eq,
3129 /* Check low half first, more likely to be decider */
3130 "cmpl %eax,(%esp)\n\t"
3131 "jne .Leq_fallthru\n\t"
3132 "cmpl %ebx,4(%esp)\n\t"
3133 "jne .Leq_fallthru\n\t"
3134 "lea 0x8(%esp),%esp\n\t"
3135 "pop %eax\n\t"
3136 "pop %ebx\n\t"
3137 /* jmp, but don't trust the assembler to choose the right jump */
3138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3139 ".Leq_fallthru:\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3141 "pop %eax\n\t"
3142 "pop %ebx");
3143
3144 if (offset_p)
3145 *offset_p = 18;
3146 if (size_p)
3147 *size_p = 4;
3148 }
3149
3150 void
3151 i386_emit_ne_goto (int *offset_p, int *size_p)
3152 {
3153 EMIT_ASM32 (ne,
3154 /* Check low half first, more likely to be decider */
3155 "cmpl %eax,(%esp)\n\t"
3156 "jne .Lne_jump\n\t"
3157 "cmpl %ebx,4(%esp)\n\t"
3158 "je .Lne_fallthru\n\t"
3159 ".Lne_jump:\n\t"
3160 "lea 0x8(%esp),%esp\n\t"
3161 "pop %eax\n\t"
3162 "pop %ebx\n\t"
3163 /* jmp, but don't trust the assembler to choose the right jump */
3164 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3165 ".Lne_fallthru:\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3167 "pop %eax\n\t"
3168 "pop %ebx");
3169
3170 if (offset_p)
3171 *offset_p = 18;
3172 if (size_p)
3173 *size_p = 4;
3174 }
3175
3176 void
3177 i386_emit_lt_goto (int *offset_p, int *size_p)
3178 {
3179 EMIT_ASM32 (lt,
3180 "cmpl %ebx,4(%esp)\n\t"
3181 "jl .Llt_jump\n\t"
3182 "jne .Llt_fallthru\n\t"
3183 "cmpl %eax,(%esp)\n\t"
3184 "jnl .Llt_fallthru\n\t"
3185 ".Llt_jump:\n\t"
3186 "lea 0x8(%esp),%esp\n\t"
3187 "pop %eax\n\t"
3188 "pop %ebx\n\t"
3189 /* jmp, but don't trust the assembler to choose the right jump */
3190 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3191 ".Llt_fallthru:\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3193 "pop %eax\n\t"
3194 "pop %ebx");
3195
3196 if (offset_p)
3197 *offset_p = 20;
3198 if (size_p)
3199 *size_p = 4;
3200 }
3201
3202 void
3203 i386_emit_le_goto (int *offset_p, int *size_p)
3204 {
3205 EMIT_ASM32 (le,
3206 "cmpl %ebx,4(%esp)\n\t"
3207 "jle .Lle_jump\n\t"
3208 "jne .Lle_fallthru\n\t"
3209 "cmpl %eax,(%esp)\n\t"
3210 "jnle .Lle_fallthru\n\t"
3211 ".Lle_jump:\n\t"
3212 "lea 0x8(%esp),%esp\n\t"
3213 "pop %eax\n\t"
3214 "pop %ebx\n\t"
3215 /* jmp, but don't trust the assembler to choose the right jump */
3216 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3217 ".Lle_fallthru:\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3219 "pop %eax\n\t"
3220 "pop %ebx");
3221
3222 if (offset_p)
3223 *offset_p = 20;
3224 if (size_p)
3225 *size_p = 4;
3226 }
3227
3228 void
3229 i386_emit_gt_goto (int *offset_p, int *size_p)
3230 {
3231 EMIT_ASM32 (gt,
3232 "cmpl %ebx,4(%esp)\n\t"
3233 "jg .Lgt_jump\n\t"
3234 "jne .Lgt_fallthru\n\t"
3235 "cmpl %eax,(%esp)\n\t"
3236 "jng .Lgt_fallthru\n\t"
3237 ".Lgt_jump:\n\t"
3238 "lea 0x8(%esp),%esp\n\t"
3239 "pop %eax\n\t"
3240 "pop %ebx\n\t"
3241 /* jmp, but don't trust the assembler to choose the right jump */
3242 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3243 ".Lgt_fallthru:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3245 "pop %eax\n\t"
3246 "pop %ebx");
3247
3248 if (offset_p)
3249 *offset_p = 20;
3250 if (size_p)
3251 *size_p = 4;
3252 }
3253
3254 void
3255 i386_emit_ge_goto (int *offset_p, int *size_p)
3256 {
3257 EMIT_ASM32 (ge,
3258 "cmpl %ebx,4(%esp)\n\t"
3259 "jge .Lge_jump\n\t"
3260 "jne .Lge_fallthru\n\t"
3261 "cmpl %eax,(%esp)\n\t"
3262 "jnge .Lge_fallthru\n\t"
3263 ".Lge_jump:\n\t"
3264 "lea 0x8(%esp),%esp\n\t"
3265 "pop %eax\n\t"
3266 "pop %ebx\n\t"
3267 /* jmp, but don't trust the assembler to choose the right jump */
3268 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3269 ".Lge_fallthru:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3271 "pop %eax\n\t"
3272 "pop %ebx");
3273
3274 if (offset_p)
3275 *offset_p = 20;
3276 if (size_p)
3277 *size_p = 4;
3278 }
3279
3280 struct emit_ops i386_emit_ops =
3281 {
3282 i386_emit_prologue,
3283 i386_emit_epilogue,
3284 i386_emit_add,
3285 i386_emit_sub,
3286 i386_emit_mul,
3287 i386_emit_lsh,
3288 i386_emit_rsh_signed,
3289 i386_emit_rsh_unsigned,
3290 i386_emit_ext,
3291 i386_emit_log_not,
3292 i386_emit_bit_and,
3293 i386_emit_bit_or,
3294 i386_emit_bit_xor,
3295 i386_emit_bit_not,
3296 i386_emit_equal,
3297 i386_emit_less_signed,
3298 i386_emit_less_unsigned,
3299 i386_emit_ref,
3300 i386_emit_if_goto,
3301 i386_emit_goto,
3302 i386_write_goto_address,
3303 i386_emit_const,
3304 i386_emit_call,
3305 i386_emit_reg,
3306 i386_emit_pop,
3307 i386_emit_stack_flush,
3308 i386_emit_zero_ext,
3309 i386_emit_swap,
3310 i386_emit_stack_adjust,
3311 i386_emit_int_call_1,
3312 i386_emit_void_call_2,
3313 i386_emit_eq_goto,
3314 i386_emit_ne_goto,
3315 i386_emit_lt_goto,
3316 i386_emit_le_goto,
3317 i386_emit_gt_goto,
3318 i386_emit_ge_goto
3319 };
3320
3321
3322 static struct emit_ops *
3323 x86_emit_ops (void)
3324 {
3325 #ifdef __x86_64__
3326 if (is_64bit_tdesc ())
3327 return &amd64_emit_ops;
3328 else
3329 #endif
3330 return &i386_emit_ops;
3331 }
3332
3333 static int
3334 x86_supports_range_stepping (void)
3335 {
3336 return 1;
3337 }
3338
3339 /* This is initialized assuming an amd64 target.
3340 x86_arch_setup will correct it for i386 or amd64 targets. */
3341
3342 struct linux_target_ops the_low_target =
3343 {
3344 x86_arch_setup,
3345 x86_linux_regs_info,
3346 x86_cannot_fetch_register,
3347 x86_cannot_store_register,
3348 NULL, /* fetch_register */
3349 x86_get_pc,
3350 x86_set_pc,
3351 x86_breakpoint,
3352 x86_breakpoint_len,
3353 NULL,
3354 1,
3355 x86_breakpoint_at,
3356 x86_insert_point,
3357 x86_remove_point,
3358 x86_stopped_by_watchpoint,
3359 x86_stopped_data_address,
3360 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3361 native i386 case (no registers smaller than an xfer unit), and are not
3362 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3363 NULL,
3364 NULL,
3365 /* need to fix up i386 siginfo if host is amd64 */
3366 x86_siginfo_fixup,
3367 x86_linux_new_process,
3368 x86_linux_new_thread,
3369 x86_linux_prepare_to_resume,
3370 x86_linux_process_qsupported,
3371 x86_supports_tracepoints,
3372 x86_get_thread_area,
3373 x86_install_fast_tracepoint_jump_pad,
3374 x86_emit_ops,
3375 x86_get_min_fast_tracepoint_insn_len,
3376 x86_supports_range_stepping,
3377 };
3378
3379 void
3380 initialize_low_arch (void)
3381 {
3382 /* Initialize the Linux target descriptions. */
3383 #ifdef __x86_64__
3384 init_registers_amd64_linux ();
3385 init_registers_amd64_avx_linux ();
3386 init_registers_amd64_mpx_linux ();
3387
3388 init_registers_x32_linux ();
3389 init_registers_x32_avx_linux ();
3390
3391 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3392 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3393 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3394 #endif
3395 init_registers_i386_linux ();
3396 init_registers_i386_mmx_linux ();
3397 init_registers_i386_avx_linux ();
3398 init_registers_i386_mpx_linux ();
3399
3400 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3401 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3402 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3403
3404 initialize_regsets_info (&x86_regsets_info);
3405 }
This page took 0.099221 seconds and 5 git commands to generate.