implement support for "enum class"
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33 #ifndef ELFMAG0
34 #include "elf/common.h"
35 #endif
36
37 #include "agent.h"
38 #include "tdesc.h"
39 #include "tracepoint.h"
40 #include "ax.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-mpx-linux.c. */
52 void init_registers_amd64_mpx_linux (void);
53 extern const struct target_desc *tdesc_amd64_mpx_linux;
54
55 /* Defined in auto-generated file x32-linux.c. */
56 void init_registers_x32_linux (void);
57 extern const struct target_desc *tdesc_x32_linux;
58
59 /* Defined in auto-generated file x32-avx-linux.c. */
60 void init_registers_x32_avx_linux (void);
61 extern const struct target_desc *tdesc_x32_avx_linux;
62
63 #endif
64
65 /* Defined in auto-generated file i386-linux.c. */
66 void init_registers_i386_linux (void);
67 extern const struct target_desc *tdesc_i386_linux;
68
69 /* Defined in auto-generated file i386-mmx-linux.c. */
70 void init_registers_i386_mmx_linux (void);
71 extern const struct target_desc *tdesc_i386_mmx_linux;
72
73 /* Defined in auto-generated file i386-avx-linux.c. */
74 void init_registers_i386_avx_linux (void);
75 extern const struct target_desc *tdesc_i386_avx_linux;
76
77 /* Defined in auto-generated file i386-mpx-linux.c. */
78 void init_registers_i386_mpx_linux (void);
79 extern const struct target_desc *tdesc_i386_mpx_linux;
80
81 #ifdef __x86_64__
82 static struct target_desc *tdesc_amd64_linux_no_xml;
83 #endif
84 static struct target_desc *tdesc_i386_linux_no_xml;
85
86
87 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
88 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
89
90 /* Backward compatibility for gdb without XML support. */
91
92 static const char *xmltarget_i386_linux_no_xml = "@<target>\
93 <architecture>i386</architecture>\
94 <osabi>GNU/Linux</osabi>\
95 </target>";
96
97 #ifdef __x86_64__
98 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
99 <architecture>i386:x86-64</architecture>\
100 <osabi>GNU/Linux</osabi>\
101 </target>";
102 #endif
103
104 #include <sys/reg.h>
105 #include <sys/procfs.h>
106 #include <sys/ptrace.h>
107 #include <sys/uio.h>
108
109 #ifndef PTRACE_GETREGSET
110 #define PTRACE_GETREGSET 0x4204
111 #endif
112
113 #ifndef PTRACE_SETREGSET
114 #define PTRACE_SETREGSET 0x4205
115 #endif
116
117
118 #ifndef PTRACE_GET_THREAD_AREA
119 #define PTRACE_GET_THREAD_AREA 25
120 #endif
121
122 /* This definition comes from prctl.h, but some kernels may not have it. */
123 #ifndef PTRACE_ARCH_PRCTL
124 #define PTRACE_ARCH_PRCTL 30
125 #endif
126
127 /* The following definitions come from prctl.h, but may be absent
128 for certain configurations. */
129 #ifndef ARCH_GET_FS
130 #define ARCH_SET_GS 0x1001
131 #define ARCH_SET_FS 0x1002
132 #define ARCH_GET_FS 0x1003
133 #define ARCH_GET_GS 0x1004
134 #endif
135
136 /* Per-process arch-specific data we want to keep. */
137
138 struct arch_process_info
139 {
140 struct i386_debug_reg_state debug_reg_state;
141 };
142
143 /* Per-thread arch-specific data we want to keep. */
144
145 struct arch_lwp_info
146 {
147 /* Non-zero if our copy differs from what's recorded in the thread. */
148 int debug_registers_changed;
149 };
150
151 #ifdef __x86_64__
152
153 /* Mapping between the general-purpose registers in `struct user'
154 format and GDB's register array layout.
155 Note that the transfer layout uses 64-bit regs. */
156 static /*const*/ int i386_regmap[] =
157 {
158 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
159 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8
162 };
163
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166 /* So code below doesn't have to care, i386 or amd64. */
167 #define ORIG_EAX ORIG_RAX
168
169 static const int x86_64_regmap[] =
170 {
171 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
172 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
173 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
174 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
175 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
176 DS * 8, ES * 8, FS * 8, GS * 8,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1,
180 -1,
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 ORIG_RAX * 8,
183 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
184 -1, -1 /* MPX registers BNDCFGU, BNDSTATUS. */
185 };
186
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188
189 #else /* ! __x86_64__ */
190
191 /* Mapping between the general-purpose registers in `struct user'
192 format and GDB's register array layout. */
193 static /*const*/ int i386_regmap[] =
194 {
195 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
196 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
197 EIP * 4, EFL * 4, CS * 4, SS * 4,
198 DS * 4, ES * 4, FS * 4, GS * 4
199 };
200
201 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
202
203 #endif
204
205 #ifdef __x86_64__
206
207 /* Returns true if the current inferior belongs to a x86-64 process,
208 per the tdesc. */
209
210 static int
211 is_64bit_tdesc (void)
212 {
213 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
214
215 return register_size (regcache->tdesc, 0) == 8;
216 }
217
218 #endif
219
220 \f
221 /* Called by libthread_db. */
222
223 ps_err_e
224 ps_get_thread_area (const struct ps_prochandle *ph,
225 lwpid_t lwpid, int idx, void **base)
226 {
227 #ifdef __x86_64__
228 int use_64bit = is_64bit_tdesc ();
229
230 if (use_64bit)
231 {
232 switch (idx)
233 {
234 case FS:
235 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
236 return PS_OK;
237 break;
238 case GS:
239 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
240 return PS_OK;
241 break;
242 default:
243 return PS_BADADDR;
244 }
245 return PS_ERR;
246 }
247 #endif
248
249 {
250 unsigned int desc[4];
251
252 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
253 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
254 return PS_ERR;
255
256 /* Ensure we properly extend the value to 64-bits for x86_64. */
257 *base = (void *) (uintptr_t) desc[1];
258 return PS_OK;
259 }
260 }
261
262 /* Get the thread area address. This is used to recognize which
263 thread is which when tracing with the in-process agent library. We
264 don't read anything from the address, and treat it as opaque; it's
265 the address itself that we assume is unique per-thread. */
266
267 static int
268 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
269 {
270 #ifdef __x86_64__
271 int use_64bit = is_64bit_tdesc ();
272
273 if (use_64bit)
274 {
275 void *base;
276 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
277 {
278 *addr = (CORE_ADDR) (uintptr_t) base;
279 return 0;
280 }
281
282 return -1;
283 }
284 #endif
285
286 {
287 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
288 struct thread_info *thr = get_lwp_thread (lwp);
289 struct regcache *regcache = get_thread_regcache (thr, 1);
290 unsigned int desc[4];
291 ULONGEST gs = 0;
292 const int reg_thread_area = 3; /* bits to scale down register value. */
293 int idx;
294
295 collect_register_by_name (regcache, "gs", &gs);
296
297 idx = gs >> reg_thread_area;
298
299 if (ptrace (PTRACE_GET_THREAD_AREA,
300 lwpid_of (thr),
301 (void *) (long) idx, (unsigned long) &desc) < 0)
302 return -1;
303
304 *addr = desc[1];
305 return 0;
306 }
307 }
308
309
310 \f
311 static int
312 x86_cannot_store_register (int regno)
313 {
314 #ifdef __x86_64__
315 if (is_64bit_tdesc ())
316 return 0;
317 #endif
318
319 return regno >= I386_NUM_REGS;
320 }
321
322 static int
323 x86_cannot_fetch_register (int regno)
324 {
325 #ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return 0;
328 #endif
329
330 return regno >= I386_NUM_REGS;
331 }
332
333 static void
334 x86_fill_gregset (struct regcache *regcache, void *buf)
335 {
336 int i;
337
338 #ifdef __x86_64__
339 if (register_size (regcache->tdesc, 0) == 8)
340 {
341 for (i = 0; i < X86_64_NUM_REGS; i++)
342 if (x86_64_regmap[i] != -1)
343 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
344 return;
345 }
346 #endif
347
348 for (i = 0; i < I386_NUM_REGS; i++)
349 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
350
351 collect_register_by_name (regcache, "orig_eax",
352 ((char *) buf) + ORIG_EAX * 4);
353 }
354
355 static void
356 x86_store_gregset (struct regcache *regcache, const void *buf)
357 {
358 int i;
359
360 #ifdef __x86_64__
361 if (register_size (regcache->tdesc, 0) == 8)
362 {
363 for (i = 0; i < X86_64_NUM_REGS; i++)
364 if (x86_64_regmap[i] != -1)
365 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
366 return;
367 }
368 #endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
371 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
372
373 supply_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * 4);
375 }
376
377 static void
378 x86_fill_fpregset (struct regcache *regcache, void *buf)
379 {
380 #ifdef __x86_64__
381 i387_cache_to_fxsave (regcache, buf);
382 #else
383 i387_cache_to_fsave (regcache, buf);
384 #endif
385 }
386
387 static void
388 x86_store_fpregset (struct regcache *regcache, const void *buf)
389 {
390 #ifdef __x86_64__
391 i387_fxsave_to_cache (regcache, buf);
392 #else
393 i387_fsave_to_cache (regcache, buf);
394 #endif
395 }
396
397 #ifndef __x86_64__
398
399 static void
400 x86_fill_fpxregset (struct regcache *regcache, void *buf)
401 {
402 i387_cache_to_fxsave (regcache, buf);
403 }
404
405 static void
406 x86_store_fpxregset (struct regcache *regcache, const void *buf)
407 {
408 i387_fxsave_to_cache (regcache, buf);
409 }
410
411 #endif
412
413 static void
414 x86_fill_xstateregset (struct regcache *regcache, void *buf)
415 {
416 i387_cache_to_xsave (regcache, buf);
417 }
418
419 static void
420 x86_store_xstateregset (struct regcache *regcache, const void *buf)
421 {
422 i387_xsave_to_cache (regcache, buf);
423 }
424
425 /* ??? The non-biarch i386 case stores all the i387 regs twice.
426 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
427 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
428 doesn't work. IWBN to avoid the duplication in the case where it
429 does work. Maybe the arch_setup routine could check whether it works
430 and update the supported regsets accordingly. */
431
432 static struct regset_info x86_regsets[] =
433 {
434 #ifdef HAVE_PTRACE_GETREGS
435 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
436 GENERAL_REGS,
437 x86_fill_gregset, x86_store_gregset },
438 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
439 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
440 # ifndef __x86_64__
441 # ifdef HAVE_PTRACE_GETFPXREGS
442 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
443 EXTENDED_REGS,
444 x86_fill_fpxregset, x86_store_fpxregset },
445 # endif
446 # endif
447 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
448 FP_REGS,
449 x86_fill_fpregset, x86_store_fpregset },
450 #endif /* HAVE_PTRACE_GETREGS */
451 { 0, 0, 0, -1, -1, NULL, NULL }
452 };
453
454 static CORE_ADDR
455 x86_get_pc (struct regcache *regcache)
456 {
457 int use_64bit = register_size (regcache->tdesc, 0) == 8;
458
459 if (use_64bit)
460 {
461 unsigned long pc;
462 collect_register_by_name (regcache, "rip", &pc);
463 return (CORE_ADDR) pc;
464 }
465 else
466 {
467 unsigned int pc;
468 collect_register_by_name (regcache, "eip", &pc);
469 return (CORE_ADDR) pc;
470 }
471 }
472
473 static void
474 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
475 {
476 int use_64bit = register_size (regcache->tdesc, 0) == 8;
477
478 if (use_64bit)
479 {
480 unsigned long newpc = pc;
481 supply_register_by_name (regcache, "rip", &newpc);
482 }
483 else
484 {
485 unsigned int newpc = pc;
486 supply_register_by_name (regcache, "eip", &newpc);
487 }
488 }
489 \f
490 static const unsigned char x86_breakpoint[] = { 0xCC };
491 #define x86_breakpoint_len 1
492
493 static int
494 x86_breakpoint_at (CORE_ADDR pc)
495 {
496 unsigned char c;
497
498 (*the_target->read_memory) (pc, &c, 1);
499 if (c == 0xCC)
500 return 1;
501
502 return 0;
503 }
504 \f
505 /* Support for debug registers. */
506
507 static unsigned long
508 x86_linux_dr_get (ptid_t ptid, int regnum)
509 {
510 int tid;
511 unsigned long value;
512
513 tid = ptid_get_lwp (ptid);
514
515 errno = 0;
516 value = ptrace (PTRACE_PEEKUSER, tid,
517 offsetof (struct user, u_debugreg[regnum]), 0);
518 if (errno != 0)
519 error ("Couldn't read debug register");
520
521 return value;
522 }
523
524 static void
525 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
526 {
527 int tid;
528
529 tid = ptid_get_lwp (ptid);
530
531 errno = 0;
532 ptrace (PTRACE_POKEUSER, tid,
533 offsetof (struct user, u_debugreg[regnum]), value);
534 if (errno != 0)
535 error ("Couldn't write debug register");
536 }
537
538 static int
539 update_debug_registers_callback (struct inferior_list_entry *entry,
540 void *pid_p)
541 {
542 struct thread_info *thr = (struct thread_info *) entry;
543 struct lwp_info *lwp = get_thread_lwp (thr);
544 int pid = *(int *) pid_p;
545
546 /* Only update the threads of this process. */
547 if (pid_of (thr) == pid)
548 {
549 /* The actual update is done later just before resuming the lwp,
550 we just mark that the registers need updating. */
551 lwp->arch_private->debug_registers_changed = 1;
552
553 /* If the lwp isn't stopped, force it to momentarily pause, so
554 we can update its debug registers. */
555 if (!lwp->stopped)
556 linux_stop_lwp (lwp);
557 }
558
559 return 0;
560 }
561
562 /* Update the inferior's debug register REGNUM from STATE. */
563
564 void
565 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
566 {
567 /* Only update the threads of this process. */
568 int pid = pid_of (current_inferior);
569
570 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
571 fatal ("Invalid debug register %d", regnum);
572
573 find_inferior (&all_threads, update_debug_registers_callback, &pid);
574 }
575
576 /* Return the inferior's debug register REGNUM. */
577
578 CORE_ADDR
579 i386_dr_low_get_addr (int regnum)
580 {
581 ptid_t ptid = ptid_of (current_inferior);
582
583 /* DR6 and DR7 are retrieved with some other way. */
584 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
585
586 return x86_linux_dr_get (ptid, regnum);
587 }
588
589 /* Update the inferior's DR7 debug control register from STATE. */
590
591 void
592 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
593 {
594 /* Only update the threads of this process. */
595 int pid = pid_of (current_inferior);
596
597 find_inferior (&all_threads, update_debug_registers_callback, &pid);
598 }
599
600 /* Return the inferior's DR7 debug control register. */
601
602 unsigned
603 i386_dr_low_get_control (void)
604 {
605 ptid_t ptid = ptid_of (current_inferior);
606
607 return x86_linux_dr_get (ptid, DR_CONTROL);
608 }
609
610 /* Get the value of the DR6 debug status register from the inferior
611 and record it in STATE. */
612
613 unsigned
614 i386_dr_low_get_status (void)
615 {
616 ptid_t ptid = ptid_of (current_inferior);
617
618 return x86_linux_dr_get (ptid, DR_STATUS);
619 }
620 \f
621 /* Breakpoint/Watchpoint support. */
622
623 static int
624 x86_insert_point (char type, CORE_ADDR addr, int len)
625 {
626 struct process_info *proc = current_process ();
627 switch (type)
628 {
629 case '0': /* software-breakpoint */
630 {
631 int ret;
632
633 ret = prepare_to_access_memory ();
634 if (ret)
635 return -1;
636 ret = set_gdb_breakpoint_at (addr);
637 done_accessing_memory ();
638 return ret;
639 }
640 case '1': /* hardware-breakpoint */
641 case '2': /* write watchpoint */
642 case '3': /* read watchpoint */
643 case '4': /* access watchpoint */
644 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
645 type, addr, len);
646
647 default:
648 /* Unsupported. */
649 return 1;
650 }
651 }
652
653 static int
654 x86_remove_point (char type, CORE_ADDR addr, int len)
655 {
656 struct process_info *proc = current_process ();
657 switch (type)
658 {
659 case '0': /* software-breakpoint */
660 {
661 int ret;
662
663 ret = prepare_to_access_memory ();
664 if (ret)
665 return -1;
666 ret = delete_gdb_breakpoint_at (addr);
667 done_accessing_memory ();
668 return ret;
669 }
670 case '1': /* hardware-breakpoint */
671 case '2': /* write watchpoint */
672 case '3': /* read watchpoint */
673 case '4': /* access watchpoint */
674 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
675 type, addr, len);
676 default:
677 /* Unsupported. */
678 return 1;
679 }
680 }
681
682 static int
683 x86_stopped_by_watchpoint (void)
684 {
685 struct process_info *proc = current_process ();
686 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
687 }
688
689 static CORE_ADDR
690 x86_stopped_data_address (void)
691 {
692 struct process_info *proc = current_process ();
693 CORE_ADDR addr;
694 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
695 &addr))
696 return addr;
697 return 0;
698 }
699 \f
700 /* Called when a new process is created. */
701
702 static struct arch_process_info *
703 x86_linux_new_process (void)
704 {
705 struct arch_process_info *info = xcalloc (1, sizeof (*info));
706
707 i386_low_init_dregs (&info->debug_reg_state);
708
709 return info;
710 }
711
712 /* Called when a new thread is detected. */
713
714 static struct arch_lwp_info *
715 x86_linux_new_thread (void)
716 {
717 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
718
719 info->debug_registers_changed = 1;
720
721 return info;
722 }
723
724 /* Called when resuming a thread.
725 If the debug regs have changed, update the thread's copies. */
726
727 static void
728 x86_linux_prepare_to_resume (struct lwp_info *lwp)
729 {
730 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
731 int clear_status = 0;
732
733 if (lwp->arch_private->debug_registers_changed)
734 {
735 int i;
736 int pid = ptid_get_pid (ptid);
737 struct process_info *proc = find_process_pid (pid);
738 struct i386_debug_reg_state *state
739 = &proc->private->arch_private->debug_reg_state;
740
741 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
742 if (state->dr_ref_count[i] > 0)
743 {
744 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
745
746 /* If we're setting a watchpoint, any change the inferior
747 had done itself to the debug registers needs to be
748 discarded, otherwise, i386_low_stopped_data_address can
749 get confused. */
750 clear_status = 1;
751 }
752
753 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
754
755 lwp->arch_private->debug_registers_changed = 0;
756 }
757
758 if (clear_status || lwp->stopped_by_watchpoint)
759 x86_linux_dr_set (ptid, DR_STATUS, 0);
760 }
761 \f
762 /* When GDBSERVER is built as a 64-bit application on linux, the
763 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
764 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
765 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
766 conversion in-place ourselves. */
767
768 /* These types below (compat_*) define a siginfo type that is layout
769 compatible with the siginfo type exported by the 32-bit userspace
770 support. */
771
772 #ifdef __x86_64__
773
774 typedef int compat_int_t;
775 typedef unsigned int compat_uptr_t;
776
777 typedef int compat_time_t;
778 typedef int compat_timer_t;
779 typedef int compat_clock_t;
780
781 struct compat_timeval
782 {
783 compat_time_t tv_sec;
784 int tv_usec;
785 };
786
787 typedef union compat_sigval
788 {
789 compat_int_t sival_int;
790 compat_uptr_t sival_ptr;
791 } compat_sigval_t;
792
793 typedef struct compat_siginfo
794 {
795 int si_signo;
796 int si_errno;
797 int si_code;
798
799 union
800 {
801 int _pad[((128 / sizeof (int)) - 3)];
802
803 /* kill() */
804 struct
805 {
806 unsigned int _pid;
807 unsigned int _uid;
808 } _kill;
809
810 /* POSIX.1b timers */
811 struct
812 {
813 compat_timer_t _tid;
814 int _overrun;
815 compat_sigval_t _sigval;
816 } _timer;
817
818 /* POSIX.1b signals */
819 struct
820 {
821 unsigned int _pid;
822 unsigned int _uid;
823 compat_sigval_t _sigval;
824 } _rt;
825
826 /* SIGCHLD */
827 struct
828 {
829 unsigned int _pid;
830 unsigned int _uid;
831 int _status;
832 compat_clock_t _utime;
833 compat_clock_t _stime;
834 } _sigchld;
835
836 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
837 struct
838 {
839 unsigned int _addr;
840 } _sigfault;
841
842 /* SIGPOLL */
843 struct
844 {
845 int _band;
846 int _fd;
847 } _sigpoll;
848 } _sifields;
849 } compat_siginfo_t;
850
851 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
852 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
853
854 typedef struct compat_x32_siginfo
855 {
856 int si_signo;
857 int si_errno;
858 int si_code;
859
860 union
861 {
862 int _pad[((128 / sizeof (int)) - 3)];
863
864 /* kill() */
865 struct
866 {
867 unsigned int _pid;
868 unsigned int _uid;
869 } _kill;
870
871 /* POSIX.1b timers */
872 struct
873 {
874 compat_timer_t _tid;
875 int _overrun;
876 compat_sigval_t _sigval;
877 } _timer;
878
879 /* POSIX.1b signals */
880 struct
881 {
882 unsigned int _pid;
883 unsigned int _uid;
884 compat_sigval_t _sigval;
885 } _rt;
886
887 /* SIGCHLD */
888 struct
889 {
890 unsigned int _pid;
891 unsigned int _uid;
892 int _status;
893 compat_x32_clock_t _utime;
894 compat_x32_clock_t _stime;
895 } _sigchld;
896
897 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
898 struct
899 {
900 unsigned int _addr;
901 } _sigfault;
902
903 /* SIGPOLL */
904 struct
905 {
906 int _band;
907 int _fd;
908 } _sigpoll;
909 } _sifields;
910 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
911
912 #define cpt_si_pid _sifields._kill._pid
913 #define cpt_si_uid _sifields._kill._uid
914 #define cpt_si_timerid _sifields._timer._tid
915 #define cpt_si_overrun _sifields._timer._overrun
916 #define cpt_si_status _sifields._sigchld._status
917 #define cpt_si_utime _sifields._sigchld._utime
918 #define cpt_si_stime _sifields._sigchld._stime
919 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
920 #define cpt_si_addr _sifields._sigfault._addr
921 #define cpt_si_band _sifields._sigpoll._band
922 #define cpt_si_fd _sifields._sigpoll._fd
923
924 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
925 In their place is si_timer1,si_timer2. */
926 #ifndef si_timerid
927 #define si_timerid si_timer1
928 #endif
929 #ifndef si_overrun
930 #define si_overrun si_timer2
931 #endif
932
933 static void
934 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
935 {
936 memset (to, 0, sizeof (*to));
937
938 to->si_signo = from->si_signo;
939 to->si_errno = from->si_errno;
940 to->si_code = from->si_code;
941
942 if (to->si_code == SI_TIMER)
943 {
944 to->cpt_si_timerid = from->si_timerid;
945 to->cpt_si_overrun = from->si_overrun;
946 to->cpt_si_ptr = (intptr_t) from->si_ptr;
947 }
948 else if (to->si_code == SI_USER)
949 {
950 to->cpt_si_pid = from->si_pid;
951 to->cpt_si_uid = from->si_uid;
952 }
953 else if (to->si_code < 0)
954 {
955 to->cpt_si_pid = from->si_pid;
956 to->cpt_si_uid = from->si_uid;
957 to->cpt_si_ptr = (intptr_t) from->si_ptr;
958 }
959 else
960 {
961 switch (to->si_signo)
962 {
963 case SIGCHLD:
964 to->cpt_si_pid = from->si_pid;
965 to->cpt_si_uid = from->si_uid;
966 to->cpt_si_status = from->si_status;
967 to->cpt_si_utime = from->si_utime;
968 to->cpt_si_stime = from->si_stime;
969 break;
970 case SIGILL:
971 case SIGFPE:
972 case SIGSEGV:
973 case SIGBUS:
974 to->cpt_si_addr = (intptr_t) from->si_addr;
975 break;
976 case SIGPOLL:
977 to->cpt_si_band = from->si_band;
978 to->cpt_si_fd = from->si_fd;
979 break;
980 default:
981 to->cpt_si_pid = from->si_pid;
982 to->cpt_si_uid = from->si_uid;
983 to->cpt_si_ptr = (intptr_t) from->si_ptr;
984 break;
985 }
986 }
987 }
988
989 static void
990 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
991 {
992 memset (to, 0, sizeof (*to));
993
994 to->si_signo = from->si_signo;
995 to->si_errno = from->si_errno;
996 to->si_code = from->si_code;
997
998 if (to->si_code == SI_TIMER)
999 {
1000 to->si_timerid = from->cpt_si_timerid;
1001 to->si_overrun = from->cpt_si_overrun;
1002 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1003 }
1004 else if (to->si_code == SI_USER)
1005 {
1006 to->si_pid = from->cpt_si_pid;
1007 to->si_uid = from->cpt_si_uid;
1008 }
1009 else if (to->si_code < 0)
1010 {
1011 to->si_pid = from->cpt_si_pid;
1012 to->si_uid = from->cpt_si_uid;
1013 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1014 }
1015 else
1016 {
1017 switch (to->si_signo)
1018 {
1019 case SIGCHLD:
1020 to->si_pid = from->cpt_si_pid;
1021 to->si_uid = from->cpt_si_uid;
1022 to->si_status = from->cpt_si_status;
1023 to->si_utime = from->cpt_si_utime;
1024 to->si_stime = from->cpt_si_stime;
1025 break;
1026 case SIGILL:
1027 case SIGFPE:
1028 case SIGSEGV:
1029 case SIGBUS:
1030 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1031 break;
1032 case SIGPOLL:
1033 to->si_band = from->cpt_si_band;
1034 to->si_fd = from->cpt_si_fd;
1035 break;
1036 default:
1037 to->si_pid = from->cpt_si_pid;
1038 to->si_uid = from->cpt_si_uid;
1039 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1040 break;
1041 }
1042 }
1043 }
1044
1045 static void
1046 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1047 siginfo_t *from)
1048 {
1049 memset (to, 0, sizeof (*to));
1050
1051 to->si_signo = from->si_signo;
1052 to->si_errno = from->si_errno;
1053 to->si_code = from->si_code;
1054
1055 if (to->si_code == SI_TIMER)
1056 {
1057 to->cpt_si_timerid = from->si_timerid;
1058 to->cpt_si_overrun = from->si_overrun;
1059 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1060 }
1061 else if (to->si_code == SI_USER)
1062 {
1063 to->cpt_si_pid = from->si_pid;
1064 to->cpt_si_uid = from->si_uid;
1065 }
1066 else if (to->si_code < 0)
1067 {
1068 to->cpt_si_pid = from->si_pid;
1069 to->cpt_si_uid = from->si_uid;
1070 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1071 }
1072 else
1073 {
1074 switch (to->si_signo)
1075 {
1076 case SIGCHLD:
1077 to->cpt_si_pid = from->si_pid;
1078 to->cpt_si_uid = from->si_uid;
1079 to->cpt_si_status = from->si_status;
1080 to->cpt_si_utime = from->si_utime;
1081 to->cpt_si_stime = from->si_stime;
1082 break;
1083 case SIGILL:
1084 case SIGFPE:
1085 case SIGSEGV:
1086 case SIGBUS:
1087 to->cpt_si_addr = (intptr_t) from->si_addr;
1088 break;
1089 case SIGPOLL:
1090 to->cpt_si_band = from->si_band;
1091 to->cpt_si_fd = from->si_fd;
1092 break;
1093 default:
1094 to->cpt_si_pid = from->si_pid;
1095 to->cpt_si_uid = from->si_uid;
1096 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1097 break;
1098 }
1099 }
1100 }
1101
1102 static void
1103 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1104 compat_x32_siginfo_t *from)
1105 {
1106 memset (to, 0, sizeof (*to));
1107
1108 to->si_signo = from->si_signo;
1109 to->si_errno = from->si_errno;
1110 to->si_code = from->si_code;
1111
1112 if (to->si_code == SI_TIMER)
1113 {
1114 to->si_timerid = from->cpt_si_timerid;
1115 to->si_overrun = from->cpt_si_overrun;
1116 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1117 }
1118 else if (to->si_code == SI_USER)
1119 {
1120 to->si_pid = from->cpt_si_pid;
1121 to->si_uid = from->cpt_si_uid;
1122 }
1123 else if (to->si_code < 0)
1124 {
1125 to->si_pid = from->cpt_si_pid;
1126 to->si_uid = from->cpt_si_uid;
1127 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1128 }
1129 else
1130 {
1131 switch (to->si_signo)
1132 {
1133 case SIGCHLD:
1134 to->si_pid = from->cpt_si_pid;
1135 to->si_uid = from->cpt_si_uid;
1136 to->si_status = from->cpt_si_status;
1137 to->si_utime = from->cpt_si_utime;
1138 to->si_stime = from->cpt_si_stime;
1139 break;
1140 case SIGILL:
1141 case SIGFPE:
1142 case SIGSEGV:
1143 case SIGBUS:
1144 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1145 break;
1146 case SIGPOLL:
1147 to->si_band = from->cpt_si_band;
1148 to->si_fd = from->cpt_si_fd;
1149 break;
1150 default:
1151 to->si_pid = from->cpt_si_pid;
1152 to->si_uid = from->cpt_si_uid;
1153 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1154 break;
1155 }
1156 }
1157 }
1158
1159 #endif /* __x86_64__ */
1160
1161 /* Convert a native/host siginfo object, into/from the siginfo in the
1162 layout of the inferiors' architecture. Returns true if any
1163 conversion was done; false otherwise. If DIRECTION is 1, then copy
1164 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1165 INF. */
1166
1167 static int
1168 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1169 {
1170 #ifdef __x86_64__
1171 unsigned int machine;
1172 int tid = lwpid_of (current_inferior);
1173 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1174
1175 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1176 if (!is_64bit_tdesc ())
1177 {
1178 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
1179 fatal ("unexpected difference in siginfo");
1180
1181 if (direction == 0)
1182 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1183 else
1184 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1185
1186 return 1;
1187 }
1188 /* No fixup for native x32 GDB. */
1189 else if (!is_elf64 && sizeof (void *) == 8)
1190 {
1191 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1192 fatal ("unexpected difference in siginfo");
1193
1194 if (direction == 0)
1195 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1196 native);
1197 else
1198 siginfo_from_compat_x32_siginfo (native,
1199 (struct compat_x32_siginfo *) inf);
1200
1201 return 1;
1202 }
1203 #endif
1204
1205 return 0;
1206 }
1207 \f
1208 static int use_xml;
1209
1210 /* Format of XSAVE extended state is:
1211 struct
1212 {
1213 fxsave_bytes[0..463]
1214 sw_usable_bytes[464..511]
1215 xstate_hdr_bytes[512..575]
1216 avx_bytes[576..831]
1217 future_state etc
1218 };
1219
1220 Same memory layout will be used for the coredump NT_X86_XSTATE
1221 representing the XSAVE extended state registers.
1222
1223 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1224 extended state mask, which is the same as the extended control register
1225 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1226 together with the mask saved in the xstate_hdr_bytes to determine what
1227 states the processor/OS supports and what state, used or initialized,
1228 the process/thread is in. */
1229 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1230
1231 /* Does the current host support the GETFPXREGS request? The header
1232 file may or may not define it, and even if it is defined, the
1233 kernel will return EIO if it's running on a pre-SSE processor. */
1234 int have_ptrace_getfpxregs =
1235 #ifdef HAVE_PTRACE_GETFPXREGS
1236 -1
1237 #else
1238 0
1239 #endif
1240 ;
1241
1242 /* Does the current host support PTRACE_GETREGSET? */
1243 static int have_ptrace_getregset = -1;
1244
1245 /* Get Linux/x86 target description from running target. */
1246
1247 static const struct target_desc *
1248 x86_linux_read_description (void)
1249 {
1250 unsigned int machine;
1251 int is_elf64;
1252 int xcr0_features;
1253 int tid;
1254 static uint64_t xcr0;
1255 struct regset_info *regset;
1256
1257 tid = lwpid_of (current_inferior);
1258
1259 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1260
1261 if (sizeof (void *) == 4)
1262 {
1263 if (is_elf64 > 0)
1264 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1265 #ifndef __x86_64__
1266 else if (machine == EM_X86_64)
1267 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1268 #endif
1269 }
1270
1271 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1272 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1273 {
1274 elf_fpxregset_t fpxregs;
1275
1276 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1277 {
1278 have_ptrace_getfpxregs = 0;
1279 have_ptrace_getregset = 0;
1280 return tdesc_i386_mmx_linux;
1281 }
1282 else
1283 have_ptrace_getfpxregs = 1;
1284 }
1285 #endif
1286
1287 if (!use_xml)
1288 {
1289 x86_xcr0 = I386_XSTATE_SSE_MASK;
1290
1291 /* Don't use XML. */
1292 #ifdef __x86_64__
1293 if (machine == EM_X86_64)
1294 return tdesc_amd64_linux_no_xml;
1295 else
1296 #endif
1297 return tdesc_i386_linux_no_xml;
1298 }
1299
1300 if (have_ptrace_getregset == -1)
1301 {
1302 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1303 struct iovec iov;
1304
1305 iov.iov_base = xstateregs;
1306 iov.iov_len = sizeof (xstateregs);
1307
1308 /* Check if PTRACE_GETREGSET works. */
1309 if (ptrace (PTRACE_GETREGSET, tid,
1310 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1311 have_ptrace_getregset = 0;
1312 else
1313 {
1314 have_ptrace_getregset = 1;
1315
1316 /* Get XCR0 from XSAVE extended state. */
1317 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1318 / sizeof (uint64_t))];
1319
1320 /* Use PTRACE_GETREGSET if it is available. */
1321 for (regset = x86_regsets;
1322 regset->fill_function != NULL; regset++)
1323 if (regset->get_request == PTRACE_GETREGSET)
1324 regset->size = I386_XSTATE_SIZE (xcr0);
1325 else if (regset->type != GENERAL_REGS)
1326 regset->size = 0;
1327 }
1328 }
1329
1330 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1331 xcr0_features = (have_ptrace_getregset
1332 && (xcr0 & I386_XSTATE_ALL_MASK));
1333
1334 if (xcr0_features)
1335 x86_xcr0 = xcr0;
1336
1337 if (machine == EM_X86_64)
1338 {
1339 #ifdef __x86_64__
1340 if (is_elf64)
1341 {
1342 if (xcr0_features)
1343 {
1344 switch (xcr0 & I386_XSTATE_ALL_MASK)
1345 {
1346 case I386_XSTATE_MPX_MASK:
1347 return tdesc_amd64_mpx_linux;
1348
1349 case I386_XSTATE_AVX_MASK:
1350 return tdesc_amd64_avx_linux;
1351
1352 default:
1353 return tdesc_amd64_linux;
1354 }
1355 }
1356 else
1357 return tdesc_amd64_linux;
1358 }
1359 else
1360 {
1361 if (xcr0_features)
1362 {
1363 switch (xcr0 & I386_XSTATE_ALL_MASK)
1364 {
1365 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1366 case I386_XSTATE_AVX_MASK:
1367 return tdesc_x32_avx_linux;
1368
1369 default:
1370 return tdesc_x32_linux;
1371 }
1372 }
1373 else
1374 return tdesc_x32_linux;
1375 }
1376 #endif
1377 }
1378 else
1379 {
1380 if (xcr0_features)
1381 {
1382 switch (xcr0 & I386_XSTATE_ALL_MASK)
1383 {
1384 case (I386_XSTATE_MPX_MASK):
1385 return tdesc_i386_mpx_linux;
1386
1387 case (I386_XSTATE_AVX_MASK):
1388 return tdesc_i386_avx_linux;
1389
1390 default:
1391 return tdesc_i386_linux;
1392 }
1393 }
1394 else
1395 return tdesc_i386_linux;
1396 }
1397
1398 gdb_assert_not_reached ("failed to return tdesc");
1399 }
1400
1401 /* Callback for find_inferior. Stops iteration when a thread with a
1402 given PID is found. */
1403
1404 static int
1405 same_process_callback (struct inferior_list_entry *entry, void *data)
1406 {
1407 int pid = *(int *) data;
1408
1409 return (ptid_get_pid (entry->id) == pid);
1410 }
1411
1412 /* Callback for for_each_inferior. Calls the arch_setup routine for
1413 each process. */
1414
1415 static void
1416 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1417 {
1418 int pid = ptid_get_pid (entry->id);
1419
1420 /* Look up any thread of this processes. */
1421 current_inferior
1422 = (struct thread_info *) find_inferior (&all_threads,
1423 same_process_callback, &pid);
1424
1425 the_low_target.arch_setup ();
1426 }
1427
1428 /* Update all the target description of all processes; a new GDB
1429 connected, and it may or not support xml target descriptions. */
1430
1431 static void
1432 x86_linux_update_xmltarget (void)
1433 {
1434 struct thread_info *save_inferior = current_inferior;
1435
1436 /* Before changing the register cache's internal layout, flush the
1437 contents of the current valid caches back to the threads, and
1438 release the current regcache objects. */
1439 regcache_release ();
1440
1441 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1442
1443 current_inferior = save_inferior;
1444 }
1445
1446 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1447 PTRACE_GETREGSET. */
1448
1449 static void
1450 x86_linux_process_qsupported (const char *query)
1451 {
1452 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1453 with "i386" in qSupported query, it supports x86 XML target
1454 descriptions. */
1455 use_xml = 0;
1456 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1457 {
1458 char *copy = xstrdup (query + 13);
1459 char *p;
1460
1461 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1462 {
1463 if (strcmp (p, "i386") == 0)
1464 {
1465 use_xml = 1;
1466 break;
1467 }
1468 }
1469
1470 free (copy);
1471 }
1472
1473 x86_linux_update_xmltarget ();
1474 }
1475
1476 /* Common for x86/x86-64. */
1477
1478 static struct regsets_info x86_regsets_info =
1479 {
1480 x86_regsets, /* regsets */
1481 0, /* num_regsets */
1482 NULL, /* disabled_regsets */
1483 };
1484
1485 #ifdef __x86_64__
1486 static struct regs_info amd64_linux_regs_info =
1487 {
1488 NULL, /* regset_bitmap */
1489 NULL, /* usrregs_info */
1490 &x86_regsets_info
1491 };
1492 #endif
1493 static struct usrregs_info i386_linux_usrregs_info =
1494 {
1495 I386_NUM_REGS,
1496 i386_regmap,
1497 };
1498
1499 static struct regs_info i386_linux_regs_info =
1500 {
1501 NULL, /* regset_bitmap */
1502 &i386_linux_usrregs_info,
1503 &x86_regsets_info
1504 };
1505
1506 const struct regs_info *
1507 x86_linux_regs_info (void)
1508 {
1509 #ifdef __x86_64__
1510 if (is_64bit_tdesc ())
1511 return &amd64_linux_regs_info;
1512 else
1513 #endif
1514 return &i386_linux_regs_info;
1515 }
1516
1517 /* Initialize the target description for the architecture of the
1518 inferior. */
1519
1520 static void
1521 x86_arch_setup (void)
1522 {
1523 current_process ()->tdesc = x86_linux_read_description ();
1524 }
1525
1526 static int
1527 x86_supports_tracepoints (void)
1528 {
1529 return 1;
1530 }
1531
1532 static void
1533 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1534 {
1535 write_inferior_memory (*to, buf, len);
1536 *to += len;
1537 }
1538
1539 static int
1540 push_opcode (unsigned char *buf, char *op)
1541 {
1542 unsigned char *buf_org = buf;
1543
1544 while (1)
1545 {
1546 char *endptr;
1547 unsigned long ul = strtoul (op, &endptr, 16);
1548
1549 if (endptr == op)
1550 break;
1551
1552 *buf++ = ul;
1553 op = endptr;
1554 }
1555
1556 return buf - buf_org;
1557 }
1558
1559 #ifdef __x86_64__
1560
1561 /* Build a jump pad that saves registers and calls a collection
1562 function. Writes a jump instruction to the jump pad to
1563 JJUMPAD_INSN. The caller is responsible to write it in at the
1564 tracepoint address. */
1565
1566 static int
1567 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1568 CORE_ADDR collector,
1569 CORE_ADDR lockaddr,
1570 ULONGEST orig_size,
1571 CORE_ADDR *jump_entry,
1572 CORE_ADDR *trampoline,
1573 ULONGEST *trampoline_size,
1574 unsigned char *jjump_pad_insn,
1575 ULONGEST *jjump_pad_insn_size,
1576 CORE_ADDR *adjusted_insn_addr,
1577 CORE_ADDR *adjusted_insn_addr_end,
1578 char *err)
1579 {
1580 unsigned char buf[40];
1581 int i, offset;
1582 int64_t loffset;
1583
1584 CORE_ADDR buildaddr = *jump_entry;
1585
1586 /* Build the jump pad. */
1587
1588 /* First, do tracepoint data collection. Save registers. */
1589 i = 0;
1590 /* Need to ensure stack pointer saved first. */
1591 buf[i++] = 0x54; /* push %rsp */
1592 buf[i++] = 0x55; /* push %rbp */
1593 buf[i++] = 0x57; /* push %rdi */
1594 buf[i++] = 0x56; /* push %rsi */
1595 buf[i++] = 0x52; /* push %rdx */
1596 buf[i++] = 0x51; /* push %rcx */
1597 buf[i++] = 0x53; /* push %rbx */
1598 buf[i++] = 0x50; /* push %rax */
1599 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1600 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1601 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1602 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1603 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1604 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1605 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1606 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1607 buf[i++] = 0x9c; /* pushfq */
1608 buf[i++] = 0x48; /* movl <addr>,%rdi */
1609 buf[i++] = 0xbf;
1610 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1611 i += sizeof (unsigned long);
1612 buf[i++] = 0x57; /* push %rdi */
1613 append_insns (&buildaddr, i, buf);
1614
1615 /* Stack space for the collecting_t object. */
1616 i = 0;
1617 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1618 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1619 memcpy (buf + i, &tpoint, 8);
1620 i += 8;
1621 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1622 i += push_opcode (&buf[i],
1623 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1624 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1625 append_insns (&buildaddr, i, buf);
1626
1627 /* spin-lock. */
1628 i = 0;
1629 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1630 memcpy (&buf[i], (void *) &lockaddr, 8);
1631 i += 8;
1632 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1633 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1634 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1635 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1636 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1637 append_insns (&buildaddr, i, buf);
1638
1639 /* Set up the gdb_collect call. */
1640 /* At this point, (stack pointer + 0x18) is the base of our saved
1641 register block. */
1642
1643 i = 0;
1644 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1645 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1646
1647 /* tpoint address may be 64-bit wide. */
1648 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1649 memcpy (buf + i, &tpoint, 8);
1650 i += 8;
1651 append_insns (&buildaddr, i, buf);
1652
1653 /* The collector function being in the shared library, may be
1654 >31-bits away off the jump pad. */
1655 i = 0;
1656 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1657 memcpy (buf + i, &collector, 8);
1658 i += 8;
1659 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1660 append_insns (&buildaddr, i, buf);
1661
1662 /* Clear the spin-lock. */
1663 i = 0;
1664 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1665 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1666 memcpy (buf + i, &lockaddr, 8);
1667 i += 8;
1668 append_insns (&buildaddr, i, buf);
1669
1670 /* Remove stack that had been used for the collect_t object. */
1671 i = 0;
1672 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1673 append_insns (&buildaddr, i, buf);
1674
1675 /* Restore register state. */
1676 i = 0;
1677 buf[i++] = 0x48; /* add $0x8,%rsp */
1678 buf[i++] = 0x83;
1679 buf[i++] = 0xc4;
1680 buf[i++] = 0x08;
1681 buf[i++] = 0x9d; /* popfq */
1682 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1683 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1684 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1685 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1686 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1687 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1688 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1689 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1690 buf[i++] = 0x58; /* pop %rax */
1691 buf[i++] = 0x5b; /* pop %rbx */
1692 buf[i++] = 0x59; /* pop %rcx */
1693 buf[i++] = 0x5a; /* pop %rdx */
1694 buf[i++] = 0x5e; /* pop %rsi */
1695 buf[i++] = 0x5f; /* pop %rdi */
1696 buf[i++] = 0x5d; /* pop %rbp */
1697 buf[i++] = 0x5c; /* pop %rsp */
1698 append_insns (&buildaddr, i, buf);
1699
1700 /* Now, adjust the original instruction to execute in the jump
1701 pad. */
1702 *adjusted_insn_addr = buildaddr;
1703 relocate_instruction (&buildaddr, tpaddr);
1704 *adjusted_insn_addr_end = buildaddr;
1705
1706 /* Finally, write a jump back to the program. */
1707
1708 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1709 if (loffset > INT_MAX || loffset < INT_MIN)
1710 {
1711 sprintf (err,
1712 "E.Jump back from jump pad too far from tracepoint "
1713 "(offset 0x%" PRIx64 " > int32).", loffset);
1714 return 1;
1715 }
1716
1717 offset = (int) loffset;
1718 memcpy (buf, jump_insn, sizeof (jump_insn));
1719 memcpy (buf + 1, &offset, 4);
1720 append_insns (&buildaddr, sizeof (jump_insn), buf);
1721
1722 /* The jump pad is now built. Wire in a jump to our jump pad. This
1723 is always done last (by our caller actually), so that we can
1724 install fast tracepoints with threads running. This relies on
1725 the agent's atomic write support. */
1726 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1727 if (loffset > INT_MAX || loffset < INT_MIN)
1728 {
1729 sprintf (err,
1730 "E.Jump pad too far from tracepoint "
1731 "(offset 0x%" PRIx64 " > int32).", loffset);
1732 return 1;
1733 }
1734
1735 offset = (int) loffset;
1736
1737 memcpy (buf, jump_insn, sizeof (jump_insn));
1738 memcpy (buf + 1, &offset, 4);
1739 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1740 *jjump_pad_insn_size = sizeof (jump_insn);
1741
1742 /* Return the end address of our pad. */
1743 *jump_entry = buildaddr;
1744
1745 return 0;
1746 }
1747
1748 #endif /* __x86_64__ */
1749
1750 /* Build a jump pad that saves registers and calls a collection
1751 function. Writes a jump instruction to the jump pad to
1752 JJUMPAD_INSN. The caller is responsible to write it in at the
1753 tracepoint address. */
1754
1755 static int
1756 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1757 CORE_ADDR collector,
1758 CORE_ADDR lockaddr,
1759 ULONGEST orig_size,
1760 CORE_ADDR *jump_entry,
1761 CORE_ADDR *trampoline,
1762 ULONGEST *trampoline_size,
1763 unsigned char *jjump_pad_insn,
1764 ULONGEST *jjump_pad_insn_size,
1765 CORE_ADDR *adjusted_insn_addr,
1766 CORE_ADDR *adjusted_insn_addr_end,
1767 char *err)
1768 {
1769 unsigned char buf[0x100];
1770 int i, offset;
1771 CORE_ADDR buildaddr = *jump_entry;
1772
1773 /* Build the jump pad. */
1774
1775 /* First, do tracepoint data collection. Save registers. */
1776 i = 0;
1777 buf[i++] = 0x60; /* pushad */
1778 buf[i++] = 0x68; /* push tpaddr aka $pc */
1779 *((int *)(buf + i)) = (int) tpaddr;
1780 i += 4;
1781 buf[i++] = 0x9c; /* pushf */
1782 buf[i++] = 0x1e; /* push %ds */
1783 buf[i++] = 0x06; /* push %es */
1784 buf[i++] = 0x0f; /* push %fs */
1785 buf[i++] = 0xa0;
1786 buf[i++] = 0x0f; /* push %gs */
1787 buf[i++] = 0xa8;
1788 buf[i++] = 0x16; /* push %ss */
1789 buf[i++] = 0x0e; /* push %cs */
1790 append_insns (&buildaddr, i, buf);
1791
1792 /* Stack space for the collecting_t object. */
1793 i = 0;
1794 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1795
1796 /* Build the object. */
1797 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1798 memcpy (buf + i, &tpoint, 4);
1799 i += 4;
1800 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1801
1802 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1803 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1804 append_insns (&buildaddr, i, buf);
1805
1806 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1807 If we cared for it, this could be using xchg alternatively. */
1808
1809 i = 0;
1810 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1811 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1812 %esp,<lockaddr> */
1813 memcpy (&buf[i], (void *) &lockaddr, 4);
1814 i += 4;
1815 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1816 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1817 append_insns (&buildaddr, i, buf);
1818
1819
1820 /* Set up arguments to the gdb_collect call. */
1821 i = 0;
1822 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1823 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1824 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1825 append_insns (&buildaddr, i, buf);
1826
1827 i = 0;
1828 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1829 append_insns (&buildaddr, i, buf);
1830
1831 i = 0;
1832 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1833 memcpy (&buf[i], (void *) &tpoint, 4);
1834 i += 4;
1835 append_insns (&buildaddr, i, buf);
1836
1837 buf[0] = 0xe8; /* call <reladdr> */
1838 offset = collector - (buildaddr + sizeof (jump_insn));
1839 memcpy (buf + 1, &offset, 4);
1840 append_insns (&buildaddr, 5, buf);
1841 /* Clean up after the call. */
1842 buf[0] = 0x83; /* add $0x8,%esp */
1843 buf[1] = 0xc4;
1844 buf[2] = 0x08;
1845 append_insns (&buildaddr, 3, buf);
1846
1847
1848 /* Clear the spin-lock. This would need the LOCK prefix on older
1849 broken archs. */
1850 i = 0;
1851 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1852 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1853 memcpy (buf + i, &lockaddr, 4);
1854 i += 4;
1855 append_insns (&buildaddr, i, buf);
1856
1857
1858 /* Remove stack that had been used for the collect_t object. */
1859 i = 0;
1860 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1861 append_insns (&buildaddr, i, buf);
1862
1863 i = 0;
1864 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1865 buf[i++] = 0xc4;
1866 buf[i++] = 0x04;
1867 buf[i++] = 0x17; /* pop %ss */
1868 buf[i++] = 0x0f; /* pop %gs */
1869 buf[i++] = 0xa9;
1870 buf[i++] = 0x0f; /* pop %fs */
1871 buf[i++] = 0xa1;
1872 buf[i++] = 0x07; /* pop %es */
1873 buf[i++] = 0x1f; /* pop %ds */
1874 buf[i++] = 0x9d; /* popf */
1875 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1876 buf[i++] = 0xc4;
1877 buf[i++] = 0x04;
1878 buf[i++] = 0x61; /* popad */
1879 append_insns (&buildaddr, i, buf);
1880
1881 /* Now, adjust the original instruction to execute in the jump
1882 pad. */
1883 *adjusted_insn_addr = buildaddr;
1884 relocate_instruction (&buildaddr, tpaddr);
1885 *adjusted_insn_addr_end = buildaddr;
1886
1887 /* Write the jump back to the program. */
1888 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1889 memcpy (buf, jump_insn, sizeof (jump_insn));
1890 memcpy (buf + 1, &offset, 4);
1891 append_insns (&buildaddr, sizeof (jump_insn), buf);
1892
1893 /* The jump pad is now built. Wire in a jump to our jump pad. This
1894 is always done last (by our caller actually), so that we can
1895 install fast tracepoints with threads running. This relies on
1896 the agent's atomic write support. */
1897 if (orig_size == 4)
1898 {
1899 /* Create a trampoline. */
1900 *trampoline_size = sizeof (jump_insn);
1901 if (!claim_trampoline_space (*trampoline_size, trampoline))
1902 {
1903 /* No trampoline space available. */
1904 strcpy (err,
1905 "E.Cannot allocate trampoline space needed for fast "
1906 "tracepoints on 4-byte instructions.");
1907 return 1;
1908 }
1909
1910 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1911 memcpy (buf, jump_insn, sizeof (jump_insn));
1912 memcpy (buf + 1, &offset, 4);
1913 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1914
1915 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1916 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1917 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1918 memcpy (buf + 2, &offset, 2);
1919 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1920 *jjump_pad_insn_size = sizeof (small_jump_insn);
1921 }
1922 else
1923 {
1924 /* Else use a 32-bit relative jump instruction. */
1925 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1926 memcpy (buf, jump_insn, sizeof (jump_insn));
1927 memcpy (buf + 1, &offset, 4);
1928 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1929 *jjump_pad_insn_size = sizeof (jump_insn);
1930 }
1931
1932 /* Return the end address of our pad. */
1933 *jump_entry = buildaddr;
1934
1935 return 0;
1936 }
1937
1938 static int
1939 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1940 CORE_ADDR collector,
1941 CORE_ADDR lockaddr,
1942 ULONGEST orig_size,
1943 CORE_ADDR *jump_entry,
1944 CORE_ADDR *trampoline,
1945 ULONGEST *trampoline_size,
1946 unsigned char *jjump_pad_insn,
1947 ULONGEST *jjump_pad_insn_size,
1948 CORE_ADDR *adjusted_insn_addr,
1949 CORE_ADDR *adjusted_insn_addr_end,
1950 char *err)
1951 {
1952 #ifdef __x86_64__
1953 if (is_64bit_tdesc ())
1954 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1955 collector, lockaddr,
1956 orig_size, jump_entry,
1957 trampoline, trampoline_size,
1958 jjump_pad_insn,
1959 jjump_pad_insn_size,
1960 adjusted_insn_addr,
1961 adjusted_insn_addr_end,
1962 err);
1963 #endif
1964
1965 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1966 collector, lockaddr,
1967 orig_size, jump_entry,
1968 trampoline, trampoline_size,
1969 jjump_pad_insn,
1970 jjump_pad_insn_size,
1971 adjusted_insn_addr,
1972 adjusted_insn_addr_end,
1973 err);
1974 }
1975
1976 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1977 architectures. */
1978
1979 static int
1980 x86_get_min_fast_tracepoint_insn_len (void)
1981 {
1982 static int warned_about_fast_tracepoints = 0;
1983
1984 #ifdef __x86_64__
1985 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1986 used for fast tracepoints. */
1987 if (is_64bit_tdesc ())
1988 return 5;
1989 #endif
1990
1991 if (agent_loaded_p ())
1992 {
1993 char errbuf[IPA_BUFSIZ];
1994
1995 errbuf[0] = '\0';
1996
1997 /* On x86, if trampolines are available, then 4-byte jump instructions
1998 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1999 with a 4-byte offset are used instead. */
2000 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2001 return 4;
2002 else
2003 {
2004 /* GDB has no channel to explain to user why a shorter fast
2005 tracepoint is not possible, but at least make GDBserver
2006 mention that something has gone awry. */
2007 if (!warned_about_fast_tracepoints)
2008 {
2009 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2010 warned_about_fast_tracepoints = 1;
2011 }
2012 return 5;
2013 }
2014 }
2015 else
2016 {
2017 /* Indicate that the minimum length is currently unknown since the IPA
2018 has not loaded yet. */
2019 return 0;
2020 }
2021 }
2022
2023 static void
2024 add_insns (unsigned char *start, int len)
2025 {
2026 CORE_ADDR buildaddr = current_insn_ptr;
2027
2028 if (debug_threads)
2029 debug_printf ("Adding %d bytes of insn at %s\n",
2030 len, paddress (buildaddr));
2031
2032 append_insns (&buildaddr, len, start);
2033 current_insn_ptr = buildaddr;
2034 }
2035
2036 /* Our general strategy for emitting code is to avoid specifying raw
2037 bytes whenever possible, and instead copy a block of inline asm
2038 that is embedded in the function. This is a little messy, because
2039 we need to keep the compiler from discarding what looks like dead
2040 code, plus suppress various warnings. */
2041
2042 #define EMIT_ASM(NAME, INSNS) \
2043 do \
2044 { \
2045 extern unsigned char start_ ## NAME, end_ ## NAME; \
2046 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2047 __asm__ ("jmp end_" #NAME "\n" \
2048 "\t" "start_" #NAME ":" \
2049 "\t" INSNS "\n" \
2050 "\t" "end_" #NAME ":"); \
2051 } while (0)
2052
2053 #ifdef __x86_64__
2054
2055 #define EMIT_ASM32(NAME,INSNS) \
2056 do \
2057 { \
2058 extern unsigned char start_ ## NAME, end_ ## NAME; \
2059 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2060 __asm__ (".code32\n" \
2061 "\t" "jmp end_" #NAME "\n" \
2062 "\t" "start_" #NAME ":\n" \
2063 "\t" INSNS "\n" \
2064 "\t" "end_" #NAME ":\n" \
2065 ".code64\n"); \
2066 } while (0)
2067
2068 #else
2069
2070 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2071
2072 #endif
2073
2074 #ifdef __x86_64__
2075
2076 static void
2077 amd64_emit_prologue (void)
2078 {
2079 EMIT_ASM (amd64_prologue,
2080 "pushq %rbp\n\t"
2081 "movq %rsp,%rbp\n\t"
2082 "sub $0x20,%rsp\n\t"
2083 "movq %rdi,-8(%rbp)\n\t"
2084 "movq %rsi,-16(%rbp)");
2085 }
2086
2087
2088 static void
2089 amd64_emit_epilogue (void)
2090 {
2091 EMIT_ASM (amd64_epilogue,
2092 "movq -16(%rbp),%rdi\n\t"
2093 "movq %rax,(%rdi)\n\t"
2094 "xor %rax,%rax\n\t"
2095 "leave\n\t"
2096 "ret");
2097 }
2098
2099 static void
2100 amd64_emit_add (void)
2101 {
2102 EMIT_ASM (amd64_add,
2103 "add (%rsp),%rax\n\t"
2104 "lea 0x8(%rsp),%rsp");
2105 }
2106
2107 static void
2108 amd64_emit_sub (void)
2109 {
2110 EMIT_ASM (amd64_sub,
2111 "sub %rax,(%rsp)\n\t"
2112 "pop %rax");
2113 }
2114
2115 static void
2116 amd64_emit_mul (void)
2117 {
2118 emit_error = 1;
2119 }
2120
2121 static void
2122 amd64_emit_lsh (void)
2123 {
2124 emit_error = 1;
2125 }
2126
2127 static void
2128 amd64_emit_rsh_signed (void)
2129 {
2130 emit_error = 1;
2131 }
2132
2133 static void
2134 amd64_emit_rsh_unsigned (void)
2135 {
2136 emit_error = 1;
2137 }
2138
2139 static void
2140 amd64_emit_ext (int arg)
2141 {
2142 switch (arg)
2143 {
2144 case 8:
2145 EMIT_ASM (amd64_ext_8,
2146 "cbtw\n\t"
2147 "cwtl\n\t"
2148 "cltq");
2149 break;
2150 case 16:
2151 EMIT_ASM (amd64_ext_16,
2152 "cwtl\n\t"
2153 "cltq");
2154 break;
2155 case 32:
2156 EMIT_ASM (amd64_ext_32,
2157 "cltq");
2158 break;
2159 default:
2160 emit_error = 1;
2161 }
2162 }
2163
2164 static void
2165 amd64_emit_log_not (void)
2166 {
2167 EMIT_ASM (amd64_log_not,
2168 "test %rax,%rax\n\t"
2169 "sete %cl\n\t"
2170 "movzbq %cl,%rax");
2171 }
2172
2173 static void
2174 amd64_emit_bit_and (void)
2175 {
2176 EMIT_ASM (amd64_and,
2177 "and (%rsp),%rax\n\t"
2178 "lea 0x8(%rsp),%rsp");
2179 }
2180
2181 static void
2182 amd64_emit_bit_or (void)
2183 {
2184 EMIT_ASM (amd64_or,
2185 "or (%rsp),%rax\n\t"
2186 "lea 0x8(%rsp),%rsp");
2187 }
2188
2189 static void
2190 amd64_emit_bit_xor (void)
2191 {
2192 EMIT_ASM (amd64_xor,
2193 "xor (%rsp),%rax\n\t"
2194 "lea 0x8(%rsp),%rsp");
2195 }
2196
2197 static void
2198 amd64_emit_bit_not (void)
2199 {
2200 EMIT_ASM (amd64_bit_not,
2201 "xorq $0xffffffffffffffff,%rax");
2202 }
2203
2204 static void
2205 amd64_emit_equal (void)
2206 {
2207 EMIT_ASM (amd64_equal,
2208 "cmp %rax,(%rsp)\n\t"
2209 "je .Lamd64_equal_true\n\t"
2210 "xor %rax,%rax\n\t"
2211 "jmp .Lamd64_equal_end\n\t"
2212 ".Lamd64_equal_true:\n\t"
2213 "mov $0x1,%rax\n\t"
2214 ".Lamd64_equal_end:\n\t"
2215 "lea 0x8(%rsp),%rsp");
2216 }
2217
2218 static void
2219 amd64_emit_less_signed (void)
2220 {
2221 EMIT_ASM (amd64_less_signed,
2222 "cmp %rax,(%rsp)\n\t"
2223 "jl .Lamd64_less_signed_true\n\t"
2224 "xor %rax,%rax\n\t"
2225 "jmp .Lamd64_less_signed_end\n\t"
2226 ".Lamd64_less_signed_true:\n\t"
2227 "mov $1,%rax\n\t"
2228 ".Lamd64_less_signed_end:\n\t"
2229 "lea 0x8(%rsp),%rsp");
2230 }
2231
2232 static void
2233 amd64_emit_less_unsigned (void)
2234 {
2235 EMIT_ASM (amd64_less_unsigned,
2236 "cmp %rax,(%rsp)\n\t"
2237 "jb .Lamd64_less_unsigned_true\n\t"
2238 "xor %rax,%rax\n\t"
2239 "jmp .Lamd64_less_unsigned_end\n\t"
2240 ".Lamd64_less_unsigned_true:\n\t"
2241 "mov $1,%rax\n\t"
2242 ".Lamd64_less_unsigned_end:\n\t"
2243 "lea 0x8(%rsp),%rsp");
2244 }
2245
2246 static void
2247 amd64_emit_ref (int size)
2248 {
2249 switch (size)
2250 {
2251 case 1:
2252 EMIT_ASM (amd64_ref1,
2253 "movb (%rax),%al");
2254 break;
2255 case 2:
2256 EMIT_ASM (amd64_ref2,
2257 "movw (%rax),%ax");
2258 break;
2259 case 4:
2260 EMIT_ASM (amd64_ref4,
2261 "movl (%rax),%eax");
2262 break;
2263 case 8:
2264 EMIT_ASM (amd64_ref8,
2265 "movq (%rax),%rax");
2266 break;
2267 }
2268 }
2269
2270 static void
2271 amd64_emit_if_goto (int *offset_p, int *size_p)
2272 {
2273 EMIT_ASM (amd64_if_goto,
2274 "mov %rax,%rcx\n\t"
2275 "pop %rax\n\t"
2276 "cmp $0,%rcx\n\t"
2277 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2278 if (offset_p)
2279 *offset_p = 10;
2280 if (size_p)
2281 *size_p = 4;
2282 }
2283
2284 static void
2285 amd64_emit_goto (int *offset_p, int *size_p)
2286 {
2287 EMIT_ASM (amd64_goto,
2288 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2289 if (offset_p)
2290 *offset_p = 1;
2291 if (size_p)
2292 *size_p = 4;
2293 }
2294
2295 static void
2296 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2297 {
2298 int diff = (to - (from + size));
2299 unsigned char buf[sizeof (int)];
2300
2301 if (size != 4)
2302 {
2303 emit_error = 1;
2304 return;
2305 }
2306
2307 memcpy (buf, &diff, sizeof (int));
2308 write_inferior_memory (from, buf, sizeof (int));
2309 }
2310
2311 static void
2312 amd64_emit_const (LONGEST num)
2313 {
2314 unsigned char buf[16];
2315 int i;
2316 CORE_ADDR buildaddr = current_insn_ptr;
2317
2318 i = 0;
2319 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2320 memcpy (&buf[i], &num, sizeof (num));
2321 i += 8;
2322 append_insns (&buildaddr, i, buf);
2323 current_insn_ptr = buildaddr;
2324 }
2325
2326 static void
2327 amd64_emit_call (CORE_ADDR fn)
2328 {
2329 unsigned char buf[16];
2330 int i;
2331 CORE_ADDR buildaddr;
2332 LONGEST offset64;
2333
2334 /* The destination function being in the shared library, may be
2335 >31-bits away off the compiled code pad. */
2336
2337 buildaddr = current_insn_ptr;
2338
2339 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2340
2341 i = 0;
2342
2343 if (offset64 > INT_MAX || offset64 < INT_MIN)
2344 {
2345 /* Offset is too large for a call. Use callq, but that requires
2346 a register, so avoid it if possible. Use r10, since it is
2347 call-clobbered, we don't have to push/pop it. */
2348 buf[i++] = 0x48; /* mov $fn,%r10 */
2349 buf[i++] = 0xba;
2350 memcpy (buf + i, &fn, 8);
2351 i += 8;
2352 buf[i++] = 0xff; /* callq *%r10 */
2353 buf[i++] = 0xd2;
2354 }
2355 else
2356 {
2357 int offset32 = offset64; /* we know we can't overflow here. */
2358 memcpy (buf + i, &offset32, 4);
2359 i += 4;
2360 }
2361
2362 append_insns (&buildaddr, i, buf);
2363 current_insn_ptr = buildaddr;
2364 }
2365
2366 static void
2367 amd64_emit_reg (int reg)
2368 {
2369 unsigned char buf[16];
2370 int i;
2371 CORE_ADDR buildaddr;
2372
2373 /* Assume raw_regs is still in %rdi. */
2374 buildaddr = current_insn_ptr;
2375 i = 0;
2376 buf[i++] = 0xbe; /* mov $<n>,%esi */
2377 memcpy (&buf[i], &reg, sizeof (reg));
2378 i += 4;
2379 append_insns (&buildaddr, i, buf);
2380 current_insn_ptr = buildaddr;
2381 amd64_emit_call (get_raw_reg_func_addr ());
2382 }
2383
2384 static void
2385 amd64_emit_pop (void)
2386 {
2387 EMIT_ASM (amd64_pop,
2388 "pop %rax");
2389 }
2390
2391 static void
2392 amd64_emit_stack_flush (void)
2393 {
2394 EMIT_ASM (amd64_stack_flush,
2395 "push %rax");
2396 }
2397
2398 static void
2399 amd64_emit_zero_ext (int arg)
2400 {
2401 switch (arg)
2402 {
2403 case 8:
2404 EMIT_ASM (amd64_zero_ext_8,
2405 "and $0xff,%rax");
2406 break;
2407 case 16:
2408 EMIT_ASM (amd64_zero_ext_16,
2409 "and $0xffff,%rax");
2410 break;
2411 case 32:
2412 EMIT_ASM (amd64_zero_ext_32,
2413 "mov $0xffffffff,%rcx\n\t"
2414 "and %rcx,%rax");
2415 break;
2416 default:
2417 emit_error = 1;
2418 }
2419 }
2420
2421 static void
2422 amd64_emit_swap (void)
2423 {
2424 EMIT_ASM (amd64_swap,
2425 "mov %rax,%rcx\n\t"
2426 "pop %rax\n\t"
2427 "push %rcx");
2428 }
2429
2430 static void
2431 amd64_emit_stack_adjust (int n)
2432 {
2433 unsigned char buf[16];
2434 int i;
2435 CORE_ADDR buildaddr = current_insn_ptr;
2436
2437 i = 0;
2438 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2439 buf[i++] = 0x8d;
2440 buf[i++] = 0x64;
2441 buf[i++] = 0x24;
2442 /* This only handles adjustments up to 16, but we don't expect any more. */
2443 buf[i++] = n * 8;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446 }
2447
2448 /* FN's prototype is `LONGEST(*fn)(int)'. */
2449
2450 static void
2451 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2452 {
2453 unsigned char buf[16];
2454 int i;
2455 CORE_ADDR buildaddr;
2456
2457 buildaddr = current_insn_ptr;
2458 i = 0;
2459 buf[i++] = 0xbf; /* movl $<n>,%edi */
2460 memcpy (&buf[i], &arg1, sizeof (arg1));
2461 i += 4;
2462 append_insns (&buildaddr, i, buf);
2463 current_insn_ptr = buildaddr;
2464 amd64_emit_call (fn);
2465 }
2466
2467 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2468
2469 static void
2470 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2471 {
2472 unsigned char buf[16];
2473 int i;
2474 CORE_ADDR buildaddr;
2475
2476 buildaddr = current_insn_ptr;
2477 i = 0;
2478 buf[i++] = 0xbf; /* movl $<n>,%edi */
2479 memcpy (&buf[i], &arg1, sizeof (arg1));
2480 i += 4;
2481 append_insns (&buildaddr, i, buf);
2482 current_insn_ptr = buildaddr;
2483 EMIT_ASM (amd64_void_call_2_a,
2484 /* Save away a copy of the stack top. */
2485 "push %rax\n\t"
2486 /* Also pass top as the second argument. */
2487 "mov %rax,%rsi");
2488 amd64_emit_call (fn);
2489 EMIT_ASM (amd64_void_call_2_b,
2490 /* Restore the stack top, %rax may have been trashed. */
2491 "pop %rax");
2492 }
2493
2494 void
2495 amd64_emit_eq_goto (int *offset_p, int *size_p)
2496 {
2497 EMIT_ASM (amd64_eq,
2498 "cmp %rax,(%rsp)\n\t"
2499 "jne .Lamd64_eq_fallthru\n\t"
2500 "lea 0x8(%rsp),%rsp\n\t"
2501 "pop %rax\n\t"
2502 /* jmp, but don't trust the assembler to choose the right jump */
2503 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2504 ".Lamd64_eq_fallthru:\n\t"
2505 "lea 0x8(%rsp),%rsp\n\t"
2506 "pop %rax");
2507
2508 if (offset_p)
2509 *offset_p = 13;
2510 if (size_p)
2511 *size_p = 4;
2512 }
2513
2514 void
2515 amd64_emit_ne_goto (int *offset_p, int *size_p)
2516 {
2517 EMIT_ASM (amd64_ne,
2518 "cmp %rax,(%rsp)\n\t"
2519 "je .Lamd64_ne_fallthru\n\t"
2520 "lea 0x8(%rsp),%rsp\n\t"
2521 "pop %rax\n\t"
2522 /* jmp, but don't trust the assembler to choose the right jump */
2523 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2524 ".Lamd64_ne_fallthru:\n\t"
2525 "lea 0x8(%rsp),%rsp\n\t"
2526 "pop %rax");
2527
2528 if (offset_p)
2529 *offset_p = 13;
2530 if (size_p)
2531 *size_p = 4;
2532 }
2533
2534 void
2535 amd64_emit_lt_goto (int *offset_p, int *size_p)
2536 {
2537 EMIT_ASM (amd64_lt,
2538 "cmp %rax,(%rsp)\n\t"
2539 "jnl .Lamd64_lt_fallthru\n\t"
2540 "lea 0x8(%rsp),%rsp\n\t"
2541 "pop %rax\n\t"
2542 /* jmp, but don't trust the assembler to choose the right jump */
2543 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2544 ".Lamd64_lt_fallthru:\n\t"
2545 "lea 0x8(%rsp),%rsp\n\t"
2546 "pop %rax");
2547
2548 if (offset_p)
2549 *offset_p = 13;
2550 if (size_p)
2551 *size_p = 4;
2552 }
2553
2554 void
2555 amd64_emit_le_goto (int *offset_p, int *size_p)
2556 {
2557 EMIT_ASM (amd64_le,
2558 "cmp %rax,(%rsp)\n\t"
2559 "jnle .Lamd64_le_fallthru\n\t"
2560 "lea 0x8(%rsp),%rsp\n\t"
2561 "pop %rax\n\t"
2562 /* jmp, but don't trust the assembler to choose the right jump */
2563 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2564 ".Lamd64_le_fallthru:\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax");
2567
2568 if (offset_p)
2569 *offset_p = 13;
2570 if (size_p)
2571 *size_p = 4;
2572 }
2573
2574 void
2575 amd64_emit_gt_goto (int *offset_p, int *size_p)
2576 {
2577 EMIT_ASM (amd64_gt,
2578 "cmp %rax,(%rsp)\n\t"
2579 "jng .Lamd64_gt_fallthru\n\t"
2580 "lea 0x8(%rsp),%rsp\n\t"
2581 "pop %rax\n\t"
2582 /* jmp, but don't trust the assembler to choose the right jump */
2583 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2584 ".Lamd64_gt_fallthru:\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax");
2587
2588 if (offset_p)
2589 *offset_p = 13;
2590 if (size_p)
2591 *size_p = 4;
2592 }
2593
2594 void
2595 amd64_emit_ge_goto (int *offset_p, int *size_p)
2596 {
2597 EMIT_ASM (amd64_ge,
2598 "cmp %rax,(%rsp)\n\t"
2599 "jnge .Lamd64_ge_fallthru\n\t"
2600 ".Lamd64_ge_jump:\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2602 "pop %rax\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Lamd64_ge_fallthru:\n\t"
2606 "lea 0x8(%rsp),%rsp\n\t"
2607 "pop %rax");
2608
2609 if (offset_p)
2610 *offset_p = 13;
2611 if (size_p)
2612 *size_p = 4;
2613 }
2614
2615 struct emit_ops amd64_emit_ops =
2616 {
2617 amd64_emit_prologue,
2618 amd64_emit_epilogue,
2619 amd64_emit_add,
2620 amd64_emit_sub,
2621 amd64_emit_mul,
2622 amd64_emit_lsh,
2623 amd64_emit_rsh_signed,
2624 amd64_emit_rsh_unsigned,
2625 amd64_emit_ext,
2626 amd64_emit_log_not,
2627 amd64_emit_bit_and,
2628 amd64_emit_bit_or,
2629 amd64_emit_bit_xor,
2630 amd64_emit_bit_not,
2631 amd64_emit_equal,
2632 amd64_emit_less_signed,
2633 amd64_emit_less_unsigned,
2634 amd64_emit_ref,
2635 amd64_emit_if_goto,
2636 amd64_emit_goto,
2637 amd64_write_goto_address,
2638 amd64_emit_const,
2639 amd64_emit_call,
2640 amd64_emit_reg,
2641 amd64_emit_pop,
2642 amd64_emit_stack_flush,
2643 amd64_emit_zero_ext,
2644 amd64_emit_swap,
2645 amd64_emit_stack_adjust,
2646 amd64_emit_int_call_1,
2647 amd64_emit_void_call_2,
2648 amd64_emit_eq_goto,
2649 amd64_emit_ne_goto,
2650 amd64_emit_lt_goto,
2651 amd64_emit_le_goto,
2652 amd64_emit_gt_goto,
2653 amd64_emit_ge_goto
2654 };
2655
2656 #endif /* __x86_64__ */
2657
2658 static void
2659 i386_emit_prologue (void)
2660 {
2661 EMIT_ASM32 (i386_prologue,
2662 "push %ebp\n\t"
2663 "mov %esp,%ebp\n\t"
2664 "push %ebx");
2665 /* At this point, the raw regs base address is at 8(%ebp), and the
2666 value pointer is at 12(%ebp). */
2667 }
2668
2669 static void
2670 i386_emit_epilogue (void)
2671 {
2672 EMIT_ASM32 (i386_epilogue,
2673 "mov 12(%ebp),%ecx\n\t"
2674 "mov %eax,(%ecx)\n\t"
2675 "mov %ebx,0x4(%ecx)\n\t"
2676 "xor %eax,%eax\n\t"
2677 "pop %ebx\n\t"
2678 "pop %ebp\n\t"
2679 "ret");
2680 }
2681
2682 static void
2683 i386_emit_add (void)
2684 {
2685 EMIT_ASM32 (i386_add,
2686 "add (%esp),%eax\n\t"
2687 "adc 0x4(%esp),%ebx\n\t"
2688 "lea 0x8(%esp),%esp");
2689 }
2690
2691 static void
2692 i386_emit_sub (void)
2693 {
2694 EMIT_ASM32 (i386_sub,
2695 "subl %eax,(%esp)\n\t"
2696 "sbbl %ebx,4(%esp)\n\t"
2697 "pop %eax\n\t"
2698 "pop %ebx\n\t");
2699 }
2700
2701 static void
2702 i386_emit_mul (void)
2703 {
2704 emit_error = 1;
2705 }
2706
2707 static void
2708 i386_emit_lsh (void)
2709 {
2710 emit_error = 1;
2711 }
2712
2713 static void
2714 i386_emit_rsh_signed (void)
2715 {
2716 emit_error = 1;
2717 }
2718
2719 static void
2720 i386_emit_rsh_unsigned (void)
2721 {
2722 emit_error = 1;
2723 }
2724
2725 static void
2726 i386_emit_ext (int arg)
2727 {
2728 switch (arg)
2729 {
2730 case 8:
2731 EMIT_ASM32 (i386_ext_8,
2732 "cbtw\n\t"
2733 "cwtl\n\t"
2734 "movl %eax,%ebx\n\t"
2735 "sarl $31,%ebx");
2736 break;
2737 case 16:
2738 EMIT_ASM32 (i386_ext_16,
2739 "cwtl\n\t"
2740 "movl %eax,%ebx\n\t"
2741 "sarl $31,%ebx");
2742 break;
2743 case 32:
2744 EMIT_ASM32 (i386_ext_32,
2745 "movl %eax,%ebx\n\t"
2746 "sarl $31,%ebx");
2747 break;
2748 default:
2749 emit_error = 1;
2750 }
2751 }
2752
2753 static void
2754 i386_emit_log_not (void)
2755 {
2756 EMIT_ASM32 (i386_log_not,
2757 "or %ebx,%eax\n\t"
2758 "test %eax,%eax\n\t"
2759 "sete %cl\n\t"
2760 "xor %ebx,%ebx\n\t"
2761 "movzbl %cl,%eax");
2762 }
2763
2764 static void
2765 i386_emit_bit_and (void)
2766 {
2767 EMIT_ASM32 (i386_and,
2768 "and (%esp),%eax\n\t"
2769 "and 0x4(%esp),%ebx\n\t"
2770 "lea 0x8(%esp),%esp");
2771 }
2772
2773 static void
2774 i386_emit_bit_or (void)
2775 {
2776 EMIT_ASM32 (i386_or,
2777 "or (%esp),%eax\n\t"
2778 "or 0x4(%esp),%ebx\n\t"
2779 "lea 0x8(%esp),%esp");
2780 }
2781
2782 static void
2783 i386_emit_bit_xor (void)
2784 {
2785 EMIT_ASM32 (i386_xor,
2786 "xor (%esp),%eax\n\t"
2787 "xor 0x4(%esp),%ebx\n\t"
2788 "lea 0x8(%esp),%esp");
2789 }
2790
2791 static void
2792 i386_emit_bit_not (void)
2793 {
2794 EMIT_ASM32 (i386_bit_not,
2795 "xor $0xffffffff,%eax\n\t"
2796 "xor $0xffffffff,%ebx\n\t");
2797 }
2798
2799 static void
2800 i386_emit_equal (void)
2801 {
2802 EMIT_ASM32 (i386_equal,
2803 "cmpl %ebx,4(%esp)\n\t"
2804 "jne .Li386_equal_false\n\t"
2805 "cmpl %eax,(%esp)\n\t"
2806 "je .Li386_equal_true\n\t"
2807 ".Li386_equal_false:\n\t"
2808 "xor %eax,%eax\n\t"
2809 "jmp .Li386_equal_end\n\t"
2810 ".Li386_equal_true:\n\t"
2811 "mov $1,%eax\n\t"
2812 ".Li386_equal_end:\n\t"
2813 "xor %ebx,%ebx\n\t"
2814 "lea 0x8(%esp),%esp");
2815 }
2816
2817 static void
2818 i386_emit_less_signed (void)
2819 {
2820 EMIT_ASM32 (i386_less_signed,
2821 "cmpl %ebx,4(%esp)\n\t"
2822 "jl .Li386_less_signed_true\n\t"
2823 "jne .Li386_less_signed_false\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jl .Li386_less_signed_true\n\t"
2826 ".Li386_less_signed_false:\n\t"
2827 "xor %eax,%eax\n\t"
2828 "jmp .Li386_less_signed_end\n\t"
2829 ".Li386_less_signed_true:\n\t"
2830 "mov $1,%eax\n\t"
2831 ".Li386_less_signed_end:\n\t"
2832 "xor %ebx,%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2834 }
2835
2836 static void
2837 i386_emit_less_unsigned (void)
2838 {
2839 EMIT_ASM32 (i386_less_unsigned,
2840 "cmpl %ebx,4(%esp)\n\t"
2841 "jb .Li386_less_unsigned_true\n\t"
2842 "jne .Li386_less_unsigned_false\n\t"
2843 "cmpl %eax,(%esp)\n\t"
2844 "jb .Li386_less_unsigned_true\n\t"
2845 ".Li386_less_unsigned_false:\n\t"
2846 "xor %eax,%eax\n\t"
2847 "jmp .Li386_less_unsigned_end\n\t"
2848 ".Li386_less_unsigned_true:\n\t"
2849 "mov $1,%eax\n\t"
2850 ".Li386_less_unsigned_end:\n\t"
2851 "xor %ebx,%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2853 }
2854
2855 static void
2856 i386_emit_ref (int size)
2857 {
2858 switch (size)
2859 {
2860 case 1:
2861 EMIT_ASM32 (i386_ref1,
2862 "movb (%eax),%al");
2863 break;
2864 case 2:
2865 EMIT_ASM32 (i386_ref2,
2866 "movw (%eax),%ax");
2867 break;
2868 case 4:
2869 EMIT_ASM32 (i386_ref4,
2870 "movl (%eax),%eax");
2871 break;
2872 case 8:
2873 EMIT_ASM32 (i386_ref8,
2874 "movl 4(%eax),%ebx\n\t"
2875 "movl (%eax),%eax");
2876 break;
2877 }
2878 }
2879
2880 static void
2881 i386_emit_if_goto (int *offset_p, int *size_p)
2882 {
2883 EMIT_ASM32 (i386_if_goto,
2884 "mov %eax,%ecx\n\t"
2885 "or %ebx,%ecx\n\t"
2886 "pop %eax\n\t"
2887 "pop %ebx\n\t"
2888 "cmpl $0,%ecx\n\t"
2889 /* Don't trust the assembler to choose the right jump */
2890 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2891
2892 if (offset_p)
2893 *offset_p = 11; /* be sure that this matches the sequence above */
2894 if (size_p)
2895 *size_p = 4;
2896 }
2897
2898 static void
2899 i386_emit_goto (int *offset_p, int *size_p)
2900 {
2901 EMIT_ASM32 (i386_goto,
2902 /* Don't trust the assembler to choose the right jump */
2903 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2904 if (offset_p)
2905 *offset_p = 1;
2906 if (size_p)
2907 *size_p = 4;
2908 }
2909
2910 static void
2911 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2912 {
2913 int diff = (to - (from + size));
2914 unsigned char buf[sizeof (int)];
2915
2916 /* We're only doing 4-byte sizes at the moment. */
2917 if (size != 4)
2918 {
2919 emit_error = 1;
2920 return;
2921 }
2922
2923 memcpy (buf, &diff, sizeof (int));
2924 write_inferior_memory (from, buf, sizeof (int));
2925 }
2926
2927 static void
2928 i386_emit_const (LONGEST num)
2929 {
2930 unsigned char buf[16];
2931 int i, hi, lo;
2932 CORE_ADDR buildaddr = current_insn_ptr;
2933
2934 i = 0;
2935 buf[i++] = 0xb8; /* mov $<n>,%eax */
2936 lo = num & 0xffffffff;
2937 memcpy (&buf[i], &lo, sizeof (lo));
2938 i += 4;
2939 hi = ((num >> 32) & 0xffffffff);
2940 if (hi)
2941 {
2942 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2943 memcpy (&buf[i], &hi, sizeof (hi));
2944 i += 4;
2945 }
2946 else
2947 {
2948 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2949 }
2950 append_insns (&buildaddr, i, buf);
2951 current_insn_ptr = buildaddr;
2952 }
2953
2954 static void
2955 i386_emit_call (CORE_ADDR fn)
2956 {
2957 unsigned char buf[16];
2958 int i, offset;
2959 CORE_ADDR buildaddr;
2960
2961 buildaddr = current_insn_ptr;
2962 i = 0;
2963 buf[i++] = 0xe8; /* call <reladdr> */
2964 offset = ((int) fn) - (buildaddr + 5);
2965 memcpy (buf + 1, &offset, 4);
2966 append_insns (&buildaddr, 5, buf);
2967 current_insn_ptr = buildaddr;
2968 }
2969
2970 static void
2971 i386_emit_reg (int reg)
2972 {
2973 unsigned char buf[16];
2974 int i;
2975 CORE_ADDR buildaddr;
2976
2977 EMIT_ASM32 (i386_reg_a,
2978 "sub $0x8,%esp");
2979 buildaddr = current_insn_ptr;
2980 i = 0;
2981 buf[i++] = 0xb8; /* mov $<n>,%eax */
2982 memcpy (&buf[i], &reg, sizeof (reg));
2983 i += 4;
2984 append_insns (&buildaddr, i, buf);
2985 current_insn_ptr = buildaddr;
2986 EMIT_ASM32 (i386_reg_b,
2987 "mov %eax,4(%esp)\n\t"
2988 "mov 8(%ebp),%eax\n\t"
2989 "mov %eax,(%esp)");
2990 i386_emit_call (get_raw_reg_func_addr ());
2991 EMIT_ASM32 (i386_reg_c,
2992 "xor %ebx,%ebx\n\t"
2993 "lea 0x8(%esp),%esp");
2994 }
2995
2996 static void
2997 i386_emit_pop (void)
2998 {
2999 EMIT_ASM32 (i386_pop,
3000 "pop %eax\n\t"
3001 "pop %ebx");
3002 }
3003
3004 static void
3005 i386_emit_stack_flush (void)
3006 {
3007 EMIT_ASM32 (i386_stack_flush,
3008 "push %ebx\n\t"
3009 "push %eax");
3010 }
3011
3012 static void
3013 i386_emit_zero_ext (int arg)
3014 {
3015 switch (arg)
3016 {
3017 case 8:
3018 EMIT_ASM32 (i386_zero_ext_8,
3019 "and $0xff,%eax\n\t"
3020 "xor %ebx,%ebx");
3021 break;
3022 case 16:
3023 EMIT_ASM32 (i386_zero_ext_16,
3024 "and $0xffff,%eax\n\t"
3025 "xor %ebx,%ebx");
3026 break;
3027 case 32:
3028 EMIT_ASM32 (i386_zero_ext_32,
3029 "xor %ebx,%ebx");
3030 break;
3031 default:
3032 emit_error = 1;
3033 }
3034 }
3035
3036 static void
3037 i386_emit_swap (void)
3038 {
3039 EMIT_ASM32 (i386_swap,
3040 "mov %eax,%ecx\n\t"
3041 "mov %ebx,%edx\n\t"
3042 "pop %eax\n\t"
3043 "pop %ebx\n\t"
3044 "push %edx\n\t"
3045 "push %ecx");
3046 }
3047
3048 static void
3049 i386_emit_stack_adjust (int n)
3050 {
3051 unsigned char buf[16];
3052 int i;
3053 CORE_ADDR buildaddr = current_insn_ptr;
3054
3055 i = 0;
3056 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3057 buf[i++] = 0x64;
3058 buf[i++] = 0x24;
3059 buf[i++] = n * 8;
3060 append_insns (&buildaddr, i, buf);
3061 current_insn_ptr = buildaddr;
3062 }
3063
3064 /* FN's prototype is `LONGEST(*fn)(int)'. */
3065
3066 static void
3067 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3068 {
3069 unsigned char buf[16];
3070 int i;
3071 CORE_ADDR buildaddr;
3072
3073 EMIT_ASM32 (i386_int_call_1_a,
3074 /* Reserve a bit of stack space. */
3075 "sub $0x8,%esp");
3076 /* Put the one argument on the stack. */
3077 buildaddr = current_insn_ptr;
3078 i = 0;
3079 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3080 buf[i++] = 0x04;
3081 buf[i++] = 0x24;
3082 memcpy (&buf[i], &arg1, sizeof (arg1));
3083 i += 4;
3084 append_insns (&buildaddr, i, buf);
3085 current_insn_ptr = buildaddr;
3086 i386_emit_call (fn);
3087 EMIT_ASM32 (i386_int_call_1_c,
3088 "mov %edx,%ebx\n\t"
3089 "lea 0x8(%esp),%esp");
3090 }
3091
3092 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3093
3094 static void
3095 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3096 {
3097 unsigned char buf[16];
3098 int i;
3099 CORE_ADDR buildaddr;
3100
3101 EMIT_ASM32 (i386_void_call_2_a,
3102 /* Preserve %eax only; we don't have to worry about %ebx. */
3103 "push %eax\n\t"
3104 /* Reserve a bit of stack space for arguments. */
3105 "sub $0x10,%esp\n\t"
3106 /* Copy "top" to the second argument position. (Note that
3107 we can't assume function won't scribble on its
3108 arguments, so don't try to restore from this.) */
3109 "mov %eax,4(%esp)\n\t"
3110 "mov %ebx,8(%esp)");
3111 /* Put the first argument on the stack. */
3112 buildaddr = current_insn_ptr;
3113 i = 0;
3114 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3115 buf[i++] = 0x04;
3116 buf[i++] = 0x24;
3117 memcpy (&buf[i], &arg1, sizeof (arg1));
3118 i += 4;
3119 append_insns (&buildaddr, i, buf);
3120 current_insn_ptr = buildaddr;
3121 i386_emit_call (fn);
3122 EMIT_ASM32 (i386_void_call_2_b,
3123 "lea 0x10(%esp),%esp\n\t"
3124 /* Restore original stack top. */
3125 "pop %eax");
3126 }
3127
3128
3129 void
3130 i386_emit_eq_goto (int *offset_p, int *size_p)
3131 {
3132 EMIT_ASM32 (eq,
3133 /* Check low half first, more likely to be decider */
3134 "cmpl %eax,(%esp)\n\t"
3135 "jne .Leq_fallthru\n\t"
3136 "cmpl %ebx,4(%esp)\n\t"
3137 "jne .Leq_fallthru\n\t"
3138 "lea 0x8(%esp),%esp\n\t"
3139 "pop %eax\n\t"
3140 "pop %ebx\n\t"
3141 /* jmp, but don't trust the assembler to choose the right jump */
3142 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3143 ".Leq_fallthru:\n\t"
3144 "lea 0x8(%esp),%esp\n\t"
3145 "pop %eax\n\t"
3146 "pop %ebx");
3147
3148 if (offset_p)
3149 *offset_p = 18;
3150 if (size_p)
3151 *size_p = 4;
3152 }
3153
3154 void
3155 i386_emit_ne_goto (int *offset_p, int *size_p)
3156 {
3157 EMIT_ASM32 (ne,
3158 /* Check low half first, more likely to be decider */
3159 "cmpl %eax,(%esp)\n\t"
3160 "jne .Lne_jump\n\t"
3161 "cmpl %ebx,4(%esp)\n\t"
3162 "je .Lne_fallthru\n\t"
3163 ".Lne_jump:\n\t"
3164 "lea 0x8(%esp),%esp\n\t"
3165 "pop %eax\n\t"
3166 "pop %ebx\n\t"
3167 /* jmp, but don't trust the assembler to choose the right jump */
3168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3169 ".Lne_fallthru:\n\t"
3170 "lea 0x8(%esp),%esp\n\t"
3171 "pop %eax\n\t"
3172 "pop %ebx");
3173
3174 if (offset_p)
3175 *offset_p = 18;
3176 if (size_p)
3177 *size_p = 4;
3178 }
3179
3180 void
3181 i386_emit_lt_goto (int *offset_p, int *size_p)
3182 {
3183 EMIT_ASM32 (lt,
3184 "cmpl %ebx,4(%esp)\n\t"
3185 "jl .Llt_jump\n\t"
3186 "jne .Llt_fallthru\n\t"
3187 "cmpl %eax,(%esp)\n\t"
3188 "jnl .Llt_fallthru\n\t"
3189 ".Llt_jump:\n\t"
3190 "lea 0x8(%esp),%esp\n\t"
3191 "pop %eax\n\t"
3192 "pop %ebx\n\t"
3193 /* jmp, but don't trust the assembler to choose the right jump */
3194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3195 ".Llt_fallthru:\n\t"
3196 "lea 0x8(%esp),%esp\n\t"
3197 "pop %eax\n\t"
3198 "pop %ebx");
3199
3200 if (offset_p)
3201 *offset_p = 20;
3202 if (size_p)
3203 *size_p = 4;
3204 }
3205
3206 void
3207 i386_emit_le_goto (int *offset_p, int *size_p)
3208 {
3209 EMIT_ASM32 (le,
3210 "cmpl %ebx,4(%esp)\n\t"
3211 "jle .Lle_jump\n\t"
3212 "jne .Lle_fallthru\n\t"
3213 "cmpl %eax,(%esp)\n\t"
3214 "jnle .Lle_fallthru\n\t"
3215 ".Lle_jump:\n\t"
3216 "lea 0x8(%esp),%esp\n\t"
3217 "pop %eax\n\t"
3218 "pop %ebx\n\t"
3219 /* jmp, but don't trust the assembler to choose the right jump */
3220 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3221 ".Lle_fallthru:\n\t"
3222 "lea 0x8(%esp),%esp\n\t"
3223 "pop %eax\n\t"
3224 "pop %ebx");
3225
3226 if (offset_p)
3227 *offset_p = 20;
3228 if (size_p)
3229 *size_p = 4;
3230 }
3231
3232 void
3233 i386_emit_gt_goto (int *offset_p, int *size_p)
3234 {
3235 EMIT_ASM32 (gt,
3236 "cmpl %ebx,4(%esp)\n\t"
3237 "jg .Lgt_jump\n\t"
3238 "jne .Lgt_fallthru\n\t"
3239 "cmpl %eax,(%esp)\n\t"
3240 "jng .Lgt_fallthru\n\t"
3241 ".Lgt_jump:\n\t"
3242 "lea 0x8(%esp),%esp\n\t"
3243 "pop %eax\n\t"
3244 "pop %ebx\n\t"
3245 /* jmp, but don't trust the assembler to choose the right jump */
3246 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3247 ".Lgt_fallthru:\n\t"
3248 "lea 0x8(%esp),%esp\n\t"
3249 "pop %eax\n\t"
3250 "pop %ebx");
3251
3252 if (offset_p)
3253 *offset_p = 20;
3254 if (size_p)
3255 *size_p = 4;
3256 }
3257
3258 void
3259 i386_emit_ge_goto (int *offset_p, int *size_p)
3260 {
3261 EMIT_ASM32 (ge,
3262 "cmpl %ebx,4(%esp)\n\t"
3263 "jge .Lge_jump\n\t"
3264 "jne .Lge_fallthru\n\t"
3265 "cmpl %eax,(%esp)\n\t"
3266 "jnge .Lge_fallthru\n\t"
3267 ".Lge_jump:\n\t"
3268 "lea 0x8(%esp),%esp\n\t"
3269 "pop %eax\n\t"
3270 "pop %ebx\n\t"
3271 /* jmp, but don't trust the assembler to choose the right jump */
3272 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3273 ".Lge_fallthru:\n\t"
3274 "lea 0x8(%esp),%esp\n\t"
3275 "pop %eax\n\t"
3276 "pop %ebx");
3277
3278 if (offset_p)
3279 *offset_p = 20;
3280 if (size_p)
3281 *size_p = 4;
3282 }
3283
3284 struct emit_ops i386_emit_ops =
3285 {
3286 i386_emit_prologue,
3287 i386_emit_epilogue,
3288 i386_emit_add,
3289 i386_emit_sub,
3290 i386_emit_mul,
3291 i386_emit_lsh,
3292 i386_emit_rsh_signed,
3293 i386_emit_rsh_unsigned,
3294 i386_emit_ext,
3295 i386_emit_log_not,
3296 i386_emit_bit_and,
3297 i386_emit_bit_or,
3298 i386_emit_bit_xor,
3299 i386_emit_bit_not,
3300 i386_emit_equal,
3301 i386_emit_less_signed,
3302 i386_emit_less_unsigned,
3303 i386_emit_ref,
3304 i386_emit_if_goto,
3305 i386_emit_goto,
3306 i386_write_goto_address,
3307 i386_emit_const,
3308 i386_emit_call,
3309 i386_emit_reg,
3310 i386_emit_pop,
3311 i386_emit_stack_flush,
3312 i386_emit_zero_ext,
3313 i386_emit_swap,
3314 i386_emit_stack_adjust,
3315 i386_emit_int_call_1,
3316 i386_emit_void_call_2,
3317 i386_emit_eq_goto,
3318 i386_emit_ne_goto,
3319 i386_emit_lt_goto,
3320 i386_emit_le_goto,
3321 i386_emit_gt_goto,
3322 i386_emit_ge_goto
3323 };
3324
3325
3326 static struct emit_ops *
3327 x86_emit_ops (void)
3328 {
3329 #ifdef __x86_64__
3330 if (is_64bit_tdesc ())
3331 return &amd64_emit_ops;
3332 else
3333 #endif
3334 return &i386_emit_ops;
3335 }
3336
3337 static int
3338 x86_supports_range_stepping (void)
3339 {
3340 return 1;
3341 }
3342
3343 /* This is initialized assuming an amd64 target.
3344 x86_arch_setup will correct it for i386 or amd64 targets. */
3345
3346 struct linux_target_ops the_low_target =
3347 {
3348 x86_arch_setup,
3349 x86_linux_regs_info,
3350 x86_cannot_fetch_register,
3351 x86_cannot_store_register,
3352 NULL, /* fetch_register */
3353 x86_get_pc,
3354 x86_set_pc,
3355 x86_breakpoint,
3356 x86_breakpoint_len,
3357 NULL,
3358 1,
3359 x86_breakpoint_at,
3360 x86_insert_point,
3361 x86_remove_point,
3362 x86_stopped_by_watchpoint,
3363 x86_stopped_data_address,
3364 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3365 native i386 case (no registers smaller than an xfer unit), and are not
3366 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3367 NULL,
3368 NULL,
3369 /* need to fix up i386 siginfo if host is amd64 */
3370 x86_siginfo_fixup,
3371 x86_linux_new_process,
3372 x86_linux_new_thread,
3373 x86_linux_prepare_to_resume,
3374 x86_linux_process_qsupported,
3375 x86_supports_tracepoints,
3376 x86_get_thread_area,
3377 x86_install_fast_tracepoint_jump_pad,
3378 x86_emit_ops,
3379 x86_get_min_fast_tracepoint_insn_len,
3380 x86_supports_range_stepping,
3381 };
3382
3383 void
3384 initialize_low_arch (void)
3385 {
3386 /* Initialize the Linux target descriptions. */
3387 #ifdef __x86_64__
3388 init_registers_amd64_linux ();
3389 init_registers_amd64_avx_linux ();
3390 init_registers_amd64_mpx_linux ();
3391
3392 init_registers_x32_linux ();
3393 init_registers_x32_avx_linux ();
3394
3395 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3396 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3397 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3398 #endif
3399 init_registers_i386_linux ();
3400 init_registers_i386_mmx_linux ();
3401 init_registers_i386_avx_linux ();
3402 init_registers_i386_mpx_linux ();
3403
3404 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3405 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3406 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3407
3408 initialize_regsets_info (&x86_regsets_info);
3409 }
This page took 0.100877 seconds and 4 git commands to generate.