d5228746371e15acd891d0191f0070a597324758
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 bool supports_z_point_type (char z_type) override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130 };
131
132 /* The singleton target ops object. */
133
134 static x86_target the_x86_target;
135
136 /* Per-process arch-specific data we want to keep. */
137
138 struct arch_process_info
139 {
140 struct x86_debug_reg_state debug_reg_state;
141 };
142
143 #ifdef __x86_64__
144
145 /* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout.
147 Note that the transfer layout uses 64-bit regs. */
148 static /*const*/ int i386_regmap[] =
149 {
150 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
151 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
152 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
153 DS * 8, ES * 8, FS * 8, GS * 8
154 };
155
156 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
157
158 /* So code below doesn't have to care, i386 or amd64. */
159 #define ORIG_EAX ORIG_RAX
160 #define REGSIZE 8
161
162 static const int x86_64_regmap[] =
163 {
164 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
165 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
166 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
167 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
168 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
169 DS * 8, ES * 8, FS * 8, GS * 8,
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 -1,
174 -1, -1, -1, -1, -1, -1, -1, -1,
175 ORIG_RAX * 8,
176 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
177 21 * 8, 22 * 8,
178 #else
179 -1, -1,
180 #endif
181 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
182 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
183 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
186 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1 /* pkru */
193 };
194
195 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
196 #define X86_64_USER_REGS (GS + 1)
197
198 #else /* ! __x86_64__ */
199
200 /* Mapping between the general-purpose registers in `struct user'
201 format and GDB's register array layout. */
202 static /*const*/ int i386_regmap[] =
203 {
204 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
205 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
206 EIP * 4, EFL * 4, CS * 4, SS * 4,
207 DS * 4, ES * 4, FS * 4, GS * 4
208 };
209
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
212 #define REGSIZE 4
213
214 #endif
215
216 #ifdef __x86_64__
217
218 /* Returns true if the current inferior belongs to a x86-64 process,
219 per the tdesc. */
220
221 static int
222 is_64bit_tdesc (void)
223 {
224 struct regcache *regcache = get_thread_regcache (current_thread, 0);
225
226 return register_size (regcache->tdesc, 0) == 8;
227 }
228
229 #endif
230
231 \f
232 /* Called by libthread_db. */
233
234 ps_err_e
235 ps_get_thread_area (struct ps_prochandle *ph,
236 lwpid_t lwpid, int idx, void **base)
237 {
238 #ifdef __x86_64__
239 int use_64bit = is_64bit_tdesc ();
240
241 if (use_64bit)
242 {
243 switch (idx)
244 {
245 case FS:
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
247 return PS_OK;
248 break;
249 case GS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
251 return PS_OK;
252 break;
253 default:
254 return PS_BADADDR;
255 }
256 return PS_ERR;
257 }
258 #endif
259
260 {
261 unsigned int desc[4];
262
263 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
264 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
265 return PS_ERR;
266
267 /* Ensure we properly extend the value to 64-bits for x86_64. */
268 *base = (void *) (uintptr_t) desc[1];
269 return PS_OK;
270 }
271 }
272
273 /* Get the thread area address. This is used to recognize which
274 thread is which when tracing with the in-process agent library. We
275 don't read anything from the address, and treat it as opaque; it's
276 the address itself that we assume is unique per-thread. */
277
278 static int
279 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
280 {
281 #ifdef __x86_64__
282 int use_64bit = is_64bit_tdesc ();
283
284 if (use_64bit)
285 {
286 void *base;
287 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
288 {
289 *addr = (CORE_ADDR) (uintptr_t) base;
290 return 0;
291 }
292
293 return -1;
294 }
295 #endif
296
297 {
298 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
299 struct thread_info *thr = get_lwp_thread (lwp);
300 struct regcache *regcache = get_thread_regcache (thr, 1);
301 unsigned int desc[4];
302 ULONGEST gs = 0;
303 const int reg_thread_area = 3; /* bits to scale down register value. */
304 int idx;
305
306 collect_register_by_name (regcache, "gs", &gs);
307
308 idx = gs >> reg_thread_area;
309
310 if (ptrace (PTRACE_GET_THREAD_AREA,
311 lwpid_of (thr),
312 (void *) (long) idx, (unsigned long) &desc) < 0)
313 return -1;
314
315 *addr = desc[1];
316 return 0;
317 }
318 }
319
320
321 \f
322 bool
323 x86_target::low_cannot_store_register (int regno)
324 {
325 #ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return false;
328 #endif
329
330 return regno >= I386_NUM_REGS;
331 }
332
333 bool
334 x86_target::low_cannot_fetch_register (int regno)
335 {
336 #ifdef __x86_64__
337 if (is_64bit_tdesc ())
338 return false;
339 #endif
340
341 return regno >= I386_NUM_REGS;
342 }
343
344 static void
345 x86_fill_gregset (struct regcache *regcache, void *buf)
346 {
347 int i;
348
349 #ifdef __x86_64__
350 if (register_size (regcache->tdesc, 0) == 8)
351 {
352 for (i = 0; i < X86_64_NUM_REGS; i++)
353 if (x86_64_regmap[i] != -1)
354 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
355
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
357 {
358 unsigned long base;
359 int lwpid = lwpid_of (current_thread);
360
361 collect_register_by_name (regcache, "fs_base", &base);
362 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
363
364 collect_register_by_name (regcache, "gs_base", &base);
365 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
366 }
367 #endif
368
369 return;
370 }
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
375 #endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
379
380 collect_register_by_name (regcache, "orig_eax",
381 ((char *) buf) + ORIG_EAX * REGSIZE);
382
383 #ifdef __x86_64__
384 /* Sign extend EAX value to avoid potential syscall restart
385 problems.
386
387 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
388 for a detailed explanation. */
389 if (register_size (regcache->tdesc, 0) == 4)
390 {
391 void *ptr = ((gdb_byte *) buf
392 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
393
394 *(int64_t *) ptr = *(int32_t *) ptr;
395 }
396 #endif
397 }
398
399 static void
400 x86_store_gregset (struct regcache *regcache, const void *buf)
401 {
402 int i;
403
404 #ifdef __x86_64__
405 if (register_size (regcache->tdesc, 0) == 8)
406 {
407 for (i = 0; i < X86_64_NUM_REGS; i++)
408 if (x86_64_regmap[i] != -1)
409 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
410
411 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
412 {
413 unsigned long base;
414 int lwpid = lwpid_of (current_thread);
415
416 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
417 supply_register_by_name (regcache, "fs_base", &base);
418
419 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
420 supply_register_by_name (regcache, "gs_base", &base);
421 }
422 #endif
423 return;
424 }
425 #endif
426
427 for (i = 0; i < I386_NUM_REGS; i++)
428 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
429
430 supply_register_by_name (regcache, "orig_eax",
431 ((char *) buf) + ORIG_EAX * REGSIZE);
432 }
433
434 static void
435 x86_fill_fpregset (struct regcache *regcache, void *buf)
436 {
437 #ifdef __x86_64__
438 i387_cache_to_fxsave (regcache, buf);
439 #else
440 i387_cache_to_fsave (regcache, buf);
441 #endif
442 }
443
444 static void
445 x86_store_fpregset (struct regcache *regcache, const void *buf)
446 {
447 #ifdef __x86_64__
448 i387_fxsave_to_cache (regcache, buf);
449 #else
450 i387_fsave_to_cache (regcache, buf);
451 #endif
452 }
453
454 #ifndef __x86_64__
455
456 static void
457 x86_fill_fpxregset (struct regcache *regcache, void *buf)
458 {
459 i387_cache_to_fxsave (regcache, buf);
460 }
461
462 static void
463 x86_store_fpxregset (struct regcache *regcache, const void *buf)
464 {
465 i387_fxsave_to_cache (regcache, buf);
466 }
467
468 #endif
469
470 static void
471 x86_fill_xstateregset (struct regcache *regcache, void *buf)
472 {
473 i387_cache_to_xsave (regcache, buf);
474 }
475
476 static void
477 x86_store_xstateregset (struct regcache *regcache, const void *buf)
478 {
479 i387_xsave_to_cache (regcache, buf);
480 }
481
482 /* ??? The non-biarch i386 case stores all the i387 regs twice.
483 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
484 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
485 doesn't work. IWBN to avoid the duplication in the case where it
486 does work. Maybe the arch_setup routine could check whether it works
487 and update the supported regsets accordingly. */
488
489 static struct regset_info x86_regsets[] =
490 {
491 #ifdef HAVE_PTRACE_GETREGS
492 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
493 GENERAL_REGS,
494 x86_fill_gregset, x86_store_gregset },
495 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
496 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
497 # ifndef __x86_64__
498 # ifdef HAVE_PTRACE_GETFPXREGS
499 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
500 EXTENDED_REGS,
501 x86_fill_fpxregset, x86_store_fpxregset },
502 # endif
503 # endif
504 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
505 FP_REGS,
506 x86_fill_fpregset, x86_store_fpregset },
507 #endif /* HAVE_PTRACE_GETREGS */
508 NULL_REGSET
509 };
510
511 bool
512 x86_target::low_supports_breakpoints ()
513 {
514 return true;
515 }
516
517 CORE_ADDR
518 x86_target::low_get_pc (regcache *regcache)
519 {
520 int use_64bit = register_size (regcache->tdesc, 0) == 8;
521
522 if (use_64bit)
523 {
524 uint64_t pc;
525
526 collect_register_by_name (regcache, "rip", &pc);
527 return (CORE_ADDR) pc;
528 }
529 else
530 {
531 uint32_t pc;
532
533 collect_register_by_name (regcache, "eip", &pc);
534 return (CORE_ADDR) pc;
535 }
536 }
537
538 void
539 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
540 {
541 int use_64bit = register_size (regcache->tdesc, 0) == 8;
542
543 if (use_64bit)
544 {
545 uint64_t newpc = pc;
546
547 supply_register_by_name (regcache, "rip", &newpc);
548 }
549 else
550 {
551 uint32_t newpc = pc;
552
553 supply_register_by_name (regcache, "eip", &newpc);
554 }
555 }
556
557 int
558 x86_target::low_decr_pc_after_break ()
559 {
560 return 1;
561 }
562
563 \f
564 static const gdb_byte x86_breakpoint[] = { 0xCC };
565 #define x86_breakpoint_len 1
566
567 bool
568 x86_target::low_breakpoint_at (CORE_ADDR pc)
569 {
570 unsigned char c;
571
572 read_memory (pc, &c, 1);
573 if (c == 0xCC)
574 return true;
575
576 return false;
577 }
578 \f
579 /* Low-level function vector. */
580 struct x86_dr_low_type x86_dr_low =
581 {
582 x86_linux_dr_set_control,
583 x86_linux_dr_set_addr,
584 x86_linux_dr_get_addr,
585 x86_linux_dr_get_status,
586 x86_linux_dr_get_control,
587 sizeof (void *),
588 };
589 \f
590 /* Breakpoint/Watchpoint support. */
591
592 bool
593 x86_target::supports_z_point_type (char z_type)
594 {
595 switch (z_type)
596 {
597 case Z_PACKET_SW_BP:
598 case Z_PACKET_HW_BP:
599 case Z_PACKET_WRITE_WP:
600 case Z_PACKET_ACCESS_WP:
601 return true;
602 default:
603 return false;
604 }
605 }
606
607 static int
608 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
609 int size, struct raw_breakpoint *bp)
610 {
611 struct process_info *proc = current_process ();
612
613 switch (type)
614 {
615 case raw_bkpt_type_hw:
616 case raw_bkpt_type_write_wp:
617 case raw_bkpt_type_access_wp:
618 {
619 enum target_hw_bp_type hw_type
620 = raw_bkpt_type_to_target_hw_bp_type (type);
621 struct x86_debug_reg_state *state
622 = &proc->priv->arch_private->debug_reg_state;
623
624 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
625 }
626
627 default:
628 /* Unsupported. */
629 return 1;
630 }
631 }
632
633 static int
634 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
635 int size, struct raw_breakpoint *bp)
636 {
637 struct process_info *proc = current_process ();
638
639 switch (type)
640 {
641 case raw_bkpt_type_hw:
642 case raw_bkpt_type_write_wp:
643 case raw_bkpt_type_access_wp:
644 {
645 enum target_hw_bp_type hw_type
646 = raw_bkpt_type_to_target_hw_bp_type (type);
647 struct x86_debug_reg_state *state
648 = &proc->priv->arch_private->debug_reg_state;
649
650 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
651 }
652 default:
653 /* Unsupported. */
654 return 1;
655 }
656 }
657
658 static int
659 x86_stopped_by_watchpoint (void)
660 {
661 struct process_info *proc = current_process ();
662 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
663 }
664
665 static CORE_ADDR
666 x86_stopped_data_address (void)
667 {
668 struct process_info *proc = current_process ();
669 CORE_ADDR addr;
670 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
671 &addr))
672 return addr;
673 return 0;
674 }
675 \f
676 /* Called when a new process is created. */
677
678 static struct arch_process_info *
679 x86_linux_new_process (void)
680 {
681 struct arch_process_info *info = XCNEW (struct arch_process_info);
682
683 x86_low_init_dregs (&info->debug_reg_state);
684
685 return info;
686 }
687
688 /* Called when a process is being deleted. */
689
690 static void
691 x86_linux_delete_process (struct arch_process_info *info)
692 {
693 xfree (info);
694 }
695
696 /* Target routine for linux_new_fork. */
697
698 static void
699 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
700 {
701 /* These are allocated by linux_add_process. */
702 gdb_assert (parent->priv != NULL
703 && parent->priv->arch_private != NULL);
704 gdb_assert (child->priv != NULL
705 && child->priv->arch_private != NULL);
706
707 /* Linux kernel before 2.6.33 commit
708 72f674d203cd230426437cdcf7dd6f681dad8b0d
709 will inherit hardware debug registers from parent
710 on fork/vfork/clone. Newer Linux kernels create such tasks with
711 zeroed debug registers.
712
713 GDB core assumes the child inherits the watchpoints/hw
714 breakpoints of the parent, and will remove them all from the
715 forked off process. Copy the debug registers mirrors into the
716 new process so that all breakpoints and watchpoints can be
717 removed together. The debug registers mirror will become zeroed
718 in the end before detaching the forked off process, thus making
719 this compatible with older Linux kernels too. */
720
721 *child->priv->arch_private = *parent->priv->arch_private;
722 }
723
724 /* See nat/x86-dregs.h. */
725
726 struct x86_debug_reg_state *
727 x86_debug_reg_state (pid_t pid)
728 {
729 struct process_info *proc = find_process_pid (pid);
730
731 return &proc->priv->arch_private->debug_reg_state;
732 }
733 \f
734 /* When GDBSERVER is built as a 64-bit application on linux, the
735 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
736 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
737 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
738 conversion in-place ourselves. */
739
740 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
741 layout of the inferiors' architecture. Returns true if any
742 conversion was done; false otherwise. If DIRECTION is 1, then copy
743 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
744 INF. */
745
746 static int
747 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
748 {
749 #ifdef __x86_64__
750 unsigned int machine;
751 int tid = lwpid_of (current_thread);
752 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
753
754 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
755 if (!is_64bit_tdesc ())
756 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
757 FIXUP_32);
758 /* No fixup for native x32 GDB. */
759 else if (!is_elf64 && sizeof (void *) == 8)
760 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
761 FIXUP_X32);
762 #endif
763
764 return 0;
765 }
766 \f
767 static int use_xml;
768
769 /* Format of XSAVE extended state is:
770 struct
771 {
772 fxsave_bytes[0..463]
773 sw_usable_bytes[464..511]
774 xstate_hdr_bytes[512..575]
775 avx_bytes[576..831]
776 future_state etc
777 };
778
779 Same memory layout will be used for the coredump NT_X86_XSTATE
780 representing the XSAVE extended state registers.
781
782 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
783 extended state mask, which is the same as the extended control register
784 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
785 together with the mask saved in the xstate_hdr_bytes to determine what
786 states the processor/OS supports and what state, used or initialized,
787 the process/thread is in. */
788 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
789
790 /* Does the current host support the GETFPXREGS request? The header
791 file may or may not define it, and even if it is defined, the
792 kernel will return EIO if it's running on a pre-SSE processor. */
793 int have_ptrace_getfpxregs =
794 #ifdef HAVE_PTRACE_GETFPXREGS
795 -1
796 #else
797 0
798 #endif
799 ;
800
801 /* Get Linux/x86 target description from running target. */
802
803 static const struct target_desc *
804 x86_linux_read_description (void)
805 {
806 unsigned int machine;
807 int is_elf64;
808 int xcr0_features;
809 int tid;
810 static uint64_t xcr0;
811 struct regset_info *regset;
812
813 tid = lwpid_of (current_thread);
814
815 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
816
817 if (sizeof (void *) == 4)
818 {
819 if (is_elf64 > 0)
820 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
821 #ifndef __x86_64__
822 else if (machine == EM_X86_64)
823 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
824 #endif
825 }
826
827 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
828 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
829 {
830 elf_fpxregset_t fpxregs;
831
832 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
833 {
834 have_ptrace_getfpxregs = 0;
835 have_ptrace_getregset = 0;
836 return i386_linux_read_description (X86_XSTATE_X87);
837 }
838 else
839 have_ptrace_getfpxregs = 1;
840 }
841 #endif
842
843 if (!use_xml)
844 {
845 x86_xcr0 = X86_XSTATE_SSE_MASK;
846
847 /* Don't use XML. */
848 #ifdef __x86_64__
849 if (machine == EM_X86_64)
850 return tdesc_amd64_linux_no_xml;
851 else
852 #endif
853 return tdesc_i386_linux_no_xml;
854 }
855
856 if (have_ptrace_getregset == -1)
857 {
858 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
859 struct iovec iov;
860
861 iov.iov_base = xstateregs;
862 iov.iov_len = sizeof (xstateregs);
863
864 /* Check if PTRACE_GETREGSET works. */
865 if (ptrace (PTRACE_GETREGSET, tid,
866 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
867 have_ptrace_getregset = 0;
868 else
869 {
870 have_ptrace_getregset = 1;
871
872 /* Get XCR0 from XSAVE extended state. */
873 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
874 / sizeof (uint64_t))];
875
876 /* Use PTRACE_GETREGSET if it is available. */
877 for (regset = x86_regsets;
878 regset->fill_function != NULL; regset++)
879 if (regset->get_request == PTRACE_GETREGSET)
880 regset->size = X86_XSTATE_SIZE (xcr0);
881 else if (regset->type != GENERAL_REGS)
882 regset->size = 0;
883 }
884 }
885
886 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
887 xcr0_features = (have_ptrace_getregset
888 && (xcr0 & X86_XSTATE_ALL_MASK));
889
890 if (xcr0_features)
891 x86_xcr0 = xcr0;
892
893 if (machine == EM_X86_64)
894 {
895 #ifdef __x86_64__
896 const target_desc *tdesc = NULL;
897
898 if (xcr0_features)
899 {
900 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
901 !is_elf64);
902 }
903
904 if (tdesc == NULL)
905 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
906 return tdesc;
907 #endif
908 }
909 else
910 {
911 const target_desc *tdesc = NULL;
912
913 if (xcr0_features)
914 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
915
916 if (tdesc == NULL)
917 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
918
919 return tdesc;
920 }
921
922 gdb_assert_not_reached ("failed to return tdesc");
923 }
924
925 /* Update all the target description of all processes; a new GDB
926 connected, and it may or not support xml target descriptions. */
927
928 void
929 x86_target::update_xmltarget ()
930 {
931 struct thread_info *saved_thread = current_thread;
932
933 /* Before changing the register cache's internal layout, flush the
934 contents of the current valid caches back to the threads, and
935 release the current regcache objects. */
936 regcache_release ();
937
938 for_each_process ([this] (process_info *proc) {
939 int pid = proc->pid;
940
941 /* Look up any thread of this process. */
942 current_thread = find_any_thread_of_pid (pid);
943
944 low_arch_setup ();
945 });
946
947 current_thread = saved_thread;
948 }
949
950 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
951 PTRACE_GETREGSET. */
952
953 static void
954 x86_linux_process_qsupported (char **features, int count)
955 {
956 int i;
957
958 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
959 with "i386" in qSupported query, it supports x86 XML target
960 descriptions. */
961 use_xml = 0;
962 for (i = 0; i < count; i++)
963 {
964 const char *feature = features[i];
965
966 if (startswith (feature, "xmlRegisters="))
967 {
968 char *copy = xstrdup (feature + 13);
969
970 char *saveptr;
971 for (char *p = strtok_r (copy, ",", &saveptr);
972 p != NULL;
973 p = strtok_r (NULL, ",", &saveptr))
974 {
975 if (strcmp (p, "i386") == 0)
976 {
977 use_xml = 1;
978 break;
979 }
980 }
981
982 free (copy);
983 }
984 }
985 the_x86_target.update_xmltarget ();
986 }
987
988 /* Common for x86/x86-64. */
989
990 static struct regsets_info x86_regsets_info =
991 {
992 x86_regsets, /* regsets */
993 0, /* num_regsets */
994 NULL, /* disabled_regsets */
995 };
996
997 #ifdef __x86_64__
998 static struct regs_info amd64_linux_regs_info =
999 {
1000 NULL, /* regset_bitmap */
1001 NULL, /* usrregs_info */
1002 &x86_regsets_info
1003 };
1004 #endif
1005 static struct usrregs_info i386_linux_usrregs_info =
1006 {
1007 I386_NUM_REGS,
1008 i386_regmap,
1009 };
1010
1011 static struct regs_info i386_linux_regs_info =
1012 {
1013 NULL, /* regset_bitmap */
1014 &i386_linux_usrregs_info,
1015 &x86_regsets_info
1016 };
1017
1018 const regs_info *
1019 x86_target::get_regs_info ()
1020 {
1021 #ifdef __x86_64__
1022 if (is_64bit_tdesc ())
1023 return &amd64_linux_regs_info;
1024 else
1025 #endif
1026 return &i386_linux_regs_info;
1027 }
1028
1029 /* Initialize the target description for the architecture of the
1030 inferior. */
1031
1032 void
1033 x86_target::low_arch_setup ()
1034 {
1035 current_process ()->tdesc = x86_linux_read_description ();
1036 }
1037
1038 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1039 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1040
1041 static void
1042 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1043 {
1044 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1045
1046 if (use_64bit)
1047 {
1048 long l_sysno;
1049
1050 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1051 *sysno = (int) l_sysno;
1052 }
1053 else
1054 collect_register_by_name (regcache, "orig_eax", sysno);
1055 }
1056
1057 static int
1058 x86_supports_tracepoints (void)
1059 {
1060 return 1;
1061 }
1062
1063 static void
1064 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1065 {
1066 target_write_memory (*to, buf, len);
1067 *to += len;
1068 }
1069
1070 static int
1071 push_opcode (unsigned char *buf, const char *op)
1072 {
1073 unsigned char *buf_org = buf;
1074
1075 while (1)
1076 {
1077 char *endptr;
1078 unsigned long ul = strtoul (op, &endptr, 16);
1079
1080 if (endptr == op)
1081 break;
1082
1083 *buf++ = ul;
1084 op = endptr;
1085 }
1086
1087 return buf - buf_org;
1088 }
1089
1090 #ifdef __x86_64__
1091
1092 /* Build a jump pad that saves registers and calls a collection
1093 function. Writes a jump instruction to the jump pad to
1094 JJUMPAD_INSN. The caller is responsible to write it in at the
1095 tracepoint address. */
1096
1097 static int
1098 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1099 CORE_ADDR collector,
1100 CORE_ADDR lockaddr,
1101 ULONGEST orig_size,
1102 CORE_ADDR *jump_entry,
1103 CORE_ADDR *trampoline,
1104 ULONGEST *trampoline_size,
1105 unsigned char *jjump_pad_insn,
1106 ULONGEST *jjump_pad_insn_size,
1107 CORE_ADDR *adjusted_insn_addr,
1108 CORE_ADDR *adjusted_insn_addr_end,
1109 char *err)
1110 {
1111 unsigned char buf[40];
1112 int i, offset;
1113 int64_t loffset;
1114
1115 CORE_ADDR buildaddr = *jump_entry;
1116
1117 /* Build the jump pad. */
1118
1119 /* First, do tracepoint data collection. Save registers. */
1120 i = 0;
1121 /* Need to ensure stack pointer saved first. */
1122 buf[i++] = 0x54; /* push %rsp */
1123 buf[i++] = 0x55; /* push %rbp */
1124 buf[i++] = 0x57; /* push %rdi */
1125 buf[i++] = 0x56; /* push %rsi */
1126 buf[i++] = 0x52; /* push %rdx */
1127 buf[i++] = 0x51; /* push %rcx */
1128 buf[i++] = 0x53; /* push %rbx */
1129 buf[i++] = 0x50; /* push %rax */
1130 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1131 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1132 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1133 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1134 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1135 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1136 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1137 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1138 buf[i++] = 0x9c; /* pushfq */
1139 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1140 buf[i++] = 0xbf;
1141 memcpy (buf + i, &tpaddr, 8);
1142 i += 8;
1143 buf[i++] = 0x57; /* push %rdi */
1144 append_insns (&buildaddr, i, buf);
1145
1146 /* Stack space for the collecting_t object. */
1147 i = 0;
1148 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1149 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1150 memcpy (buf + i, &tpoint, 8);
1151 i += 8;
1152 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1153 i += push_opcode (&buf[i],
1154 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1155 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1156 append_insns (&buildaddr, i, buf);
1157
1158 /* spin-lock. */
1159 i = 0;
1160 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1161 memcpy (&buf[i], (void *) &lockaddr, 8);
1162 i += 8;
1163 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1164 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1165 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1166 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1167 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1168 append_insns (&buildaddr, i, buf);
1169
1170 /* Set up the gdb_collect call. */
1171 /* At this point, (stack pointer + 0x18) is the base of our saved
1172 register block. */
1173
1174 i = 0;
1175 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1176 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1177
1178 /* tpoint address may be 64-bit wide. */
1179 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1180 memcpy (buf + i, &tpoint, 8);
1181 i += 8;
1182 append_insns (&buildaddr, i, buf);
1183
1184 /* The collector function being in the shared library, may be
1185 >31-bits away off the jump pad. */
1186 i = 0;
1187 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1188 memcpy (buf + i, &collector, 8);
1189 i += 8;
1190 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1191 append_insns (&buildaddr, i, buf);
1192
1193 /* Clear the spin-lock. */
1194 i = 0;
1195 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1196 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1197 memcpy (buf + i, &lockaddr, 8);
1198 i += 8;
1199 append_insns (&buildaddr, i, buf);
1200
1201 /* Remove stack that had been used for the collect_t object. */
1202 i = 0;
1203 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1204 append_insns (&buildaddr, i, buf);
1205
1206 /* Restore register state. */
1207 i = 0;
1208 buf[i++] = 0x48; /* add $0x8,%rsp */
1209 buf[i++] = 0x83;
1210 buf[i++] = 0xc4;
1211 buf[i++] = 0x08;
1212 buf[i++] = 0x9d; /* popfq */
1213 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1214 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1215 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1216 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1217 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1218 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1219 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1220 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1221 buf[i++] = 0x58; /* pop %rax */
1222 buf[i++] = 0x5b; /* pop %rbx */
1223 buf[i++] = 0x59; /* pop %rcx */
1224 buf[i++] = 0x5a; /* pop %rdx */
1225 buf[i++] = 0x5e; /* pop %rsi */
1226 buf[i++] = 0x5f; /* pop %rdi */
1227 buf[i++] = 0x5d; /* pop %rbp */
1228 buf[i++] = 0x5c; /* pop %rsp */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* Now, adjust the original instruction to execute in the jump
1232 pad. */
1233 *adjusted_insn_addr = buildaddr;
1234 relocate_instruction (&buildaddr, tpaddr);
1235 *adjusted_insn_addr_end = buildaddr;
1236
1237 /* Finally, write a jump back to the program. */
1238
1239 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1240 if (loffset > INT_MAX || loffset < INT_MIN)
1241 {
1242 sprintf (err,
1243 "E.Jump back from jump pad too far from tracepoint "
1244 "(offset 0x%" PRIx64 " > int32).", loffset);
1245 return 1;
1246 }
1247
1248 offset = (int) loffset;
1249 memcpy (buf, jump_insn, sizeof (jump_insn));
1250 memcpy (buf + 1, &offset, 4);
1251 append_insns (&buildaddr, sizeof (jump_insn), buf);
1252
1253 /* The jump pad is now built. Wire in a jump to our jump pad. This
1254 is always done last (by our caller actually), so that we can
1255 install fast tracepoints with threads running. This relies on
1256 the agent's atomic write support. */
1257 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1258 if (loffset > INT_MAX || loffset < INT_MIN)
1259 {
1260 sprintf (err,
1261 "E.Jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64 " > int32).", loffset);
1263 return 1;
1264 }
1265
1266 offset = (int) loffset;
1267
1268 memcpy (buf, jump_insn, sizeof (jump_insn));
1269 memcpy (buf + 1, &offset, 4);
1270 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1271 *jjump_pad_insn_size = sizeof (jump_insn);
1272
1273 /* Return the end address of our pad. */
1274 *jump_entry = buildaddr;
1275
1276 return 0;
1277 }
1278
1279 #endif /* __x86_64__ */
1280
1281 /* Build a jump pad that saves registers and calls a collection
1282 function. Writes a jump instruction to the jump pad to
1283 JJUMPAD_INSN. The caller is responsible to write it in at the
1284 tracepoint address. */
1285
1286 static int
1287 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1288 CORE_ADDR collector,
1289 CORE_ADDR lockaddr,
1290 ULONGEST orig_size,
1291 CORE_ADDR *jump_entry,
1292 CORE_ADDR *trampoline,
1293 ULONGEST *trampoline_size,
1294 unsigned char *jjump_pad_insn,
1295 ULONGEST *jjump_pad_insn_size,
1296 CORE_ADDR *adjusted_insn_addr,
1297 CORE_ADDR *adjusted_insn_addr_end,
1298 char *err)
1299 {
1300 unsigned char buf[0x100];
1301 int i, offset;
1302 CORE_ADDR buildaddr = *jump_entry;
1303
1304 /* Build the jump pad. */
1305
1306 /* First, do tracepoint data collection. Save registers. */
1307 i = 0;
1308 buf[i++] = 0x60; /* pushad */
1309 buf[i++] = 0x68; /* push tpaddr aka $pc */
1310 *((int *)(buf + i)) = (int) tpaddr;
1311 i += 4;
1312 buf[i++] = 0x9c; /* pushf */
1313 buf[i++] = 0x1e; /* push %ds */
1314 buf[i++] = 0x06; /* push %es */
1315 buf[i++] = 0x0f; /* push %fs */
1316 buf[i++] = 0xa0;
1317 buf[i++] = 0x0f; /* push %gs */
1318 buf[i++] = 0xa8;
1319 buf[i++] = 0x16; /* push %ss */
1320 buf[i++] = 0x0e; /* push %cs */
1321 append_insns (&buildaddr, i, buf);
1322
1323 /* Stack space for the collecting_t object. */
1324 i = 0;
1325 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1326
1327 /* Build the object. */
1328 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1329 memcpy (buf + i, &tpoint, 4);
1330 i += 4;
1331 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1332
1333 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1334 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1335 append_insns (&buildaddr, i, buf);
1336
1337 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1338 If we cared for it, this could be using xchg alternatively. */
1339
1340 i = 0;
1341 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1342 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1343 %esp,<lockaddr> */
1344 memcpy (&buf[i], (void *) &lockaddr, 4);
1345 i += 4;
1346 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1347 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1348 append_insns (&buildaddr, i, buf);
1349
1350
1351 /* Set up arguments to the gdb_collect call. */
1352 i = 0;
1353 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1354 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1355 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1356 append_insns (&buildaddr, i, buf);
1357
1358 i = 0;
1359 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1360 append_insns (&buildaddr, i, buf);
1361
1362 i = 0;
1363 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1364 memcpy (&buf[i], (void *) &tpoint, 4);
1365 i += 4;
1366 append_insns (&buildaddr, i, buf);
1367
1368 buf[0] = 0xe8; /* call <reladdr> */
1369 offset = collector - (buildaddr + sizeof (jump_insn));
1370 memcpy (buf + 1, &offset, 4);
1371 append_insns (&buildaddr, 5, buf);
1372 /* Clean up after the call. */
1373 buf[0] = 0x83; /* add $0x8,%esp */
1374 buf[1] = 0xc4;
1375 buf[2] = 0x08;
1376 append_insns (&buildaddr, 3, buf);
1377
1378
1379 /* Clear the spin-lock. This would need the LOCK prefix on older
1380 broken archs. */
1381 i = 0;
1382 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1383 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1384 memcpy (buf + i, &lockaddr, 4);
1385 i += 4;
1386 append_insns (&buildaddr, i, buf);
1387
1388
1389 /* Remove stack that had been used for the collect_t object. */
1390 i = 0;
1391 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1392 append_insns (&buildaddr, i, buf);
1393
1394 i = 0;
1395 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1396 buf[i++] = 0xc4;
1397 buf[i++] = 0x04;
1398 buf[i++] = 0x17; /* pop %ss */
1399 buf[i++] = 0x0f; /* pop %gs */
1400 buf[i++] = 0xa9;
1401 buf[i++] = 0x0f; /* pop %fs */
1402 buf[i++] = 0xa1;
1403 buf[i++] = 0x07; /* pop %es */
1404 buf[i++] = 0x1f; /* pop %ds */
1405 buf[i++] = 0x9d; /* popf */
1406 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1407 buf[i++] = 0xc4;
1408 buf[i++] = 0x04;
1409 buf[i++] = 0x61; /* popad */
1410 append_insns (&buildaddr, i, buf);
1411
1412 /* Now, adjust the original instruction to execute in the jump
1413 pad. */
1414 *adjusted_insn_addr = buildaddr;
1415 relocate_instruction (&buildaddr, tpaddr);
1416 *adjusted_insn_addr_end = buildaddr;
1417
1418 /* Write the jump back to the program. */
1419 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1420 memcpy (buf, jump_insn, sizeof (jump_insn));
1421 memcpy (buf + 1, &offset, 4);
1422 append_insns (&buildaddr, sizeof (jump_insn), buf);
1423
1424 /* The jump pad is now built. Wire in a jump to our jump pad. This
1425 is always done last (by our caller actually), so that we can
1426 install fast tracepoints with threads running. This relies on
1427 the agent's atomic write support. */
1428 if (orig_size == 4)
1429 {
1430 /* Create a trampoline. */
1431 *trampoline_size = sizeof (jump_insn);
1432 if (!claim_trampoline_space (*trampoline_size, trampoline))
1433 {
1434 /* No trampoline space available. */
1435 strcpy (err,
1436 "E.Cannot allocate trampoline space needed for fast "
1437 "tracepoints on 4-byte instructions.");
1438 return 1;
1439 }
1440
1441 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1442 memcpy (buf, jump_insn, sizeof (jump_insn));
1443 memcpy (buf + 1, &offset, 4);
1444 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1445
1446 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1447 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1448 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1449 memcpy (buf + 2, &offset, 2);
1450 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1451 *jjump_pad_insn_size = sizeof (small_jump_insn);
1452 }
1453 else
1454 {
1455 /* Else use a 32-bit relative jump instruction. */
1456 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1457 memcpy (buf, jump_insn, sizeof (jump_insn));
1458 memcpy (buf + 1, &offset, 4);
1459 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1460 *jjump_pad_insn_size = sizeof (jump_insn);
1461 }
1462
1463 /* Return the end address of our pad. */
1464 *jump_entry = buildaddr;
1465
1466 return 0;
1467 }
1468
1469 static int
1470 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1471 CORE_ADDR collector,
1472 CORE_ADDR lockaddr,
1473 ULONGEST orig_size,
1474 CORE_ADDR *jump_entry,
1475 CORE_ADDR *trampoline,
1476 ULONGEST *trampoline_size,
1477 unsigned char *jjump_pad_insn,
1478 ULONGEST *jjump_pad_insn_size,
1479 CORE_ADDR *adjusted_insn_addr,
1480 CORE_ADDR *adjusted_insn_addr_end,
1481 char *err)
1482 {
1483 #ifdef __x86_64__
1484 if (is_64bit_tdesc ())
1485 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1486 collector, lockaddr,
1487 orig_size, jump_entry,
1488 trampoline, trampoline_size,
1489 jjump_pad_insn,
1490 jjump_pad_insn_size,
1491 adjusted_insn_addr,
1492 adjusted_insn_addr_end,
1493 err);
1494 #endif
1495
1496 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1497 collector, lockaddr,
1498 orig_size, jump_entry,
1499 trampoline, trampoline_size,
1500 jjump_pad_insn,
1501 jjump_pad_insn_size,
1502 adjusted_insn_addr,
1503 adjusted_insn_addr_end,
1504 err);
1505 }
1506
1507 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1508 architectures. */
1509
1510 static int
1511 x86_get_min_fast_tracepoint_insn_len (void)
1512 {
1513 static int warned_about_fast_tracepoints = 0;
1514
1515 #ifdef __x86_64__
1516 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1517 used for fast tracepoints. */
1518 if (is_64bit_tdesc ())
1519 return 5;
1520 #endif
1521
1522 if (agent_loaded_p ())
1523 {
1524 char errbuf[IPA_BUFSIZ];
1525
1526 errbuf[0] = '\0';
1527
1528 /* On x86, if trampolines are available, then 4-byte jump instructions
1529 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1530 with a 4-byte offset are used instead. */
1531 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1532 return 4;
1533 else
1534 {
1535 /* GDB has no channel to explain to user why a shorter fast
1536 tracepoint is not possible, but at least make GDBserver
1537 mention that something has gone awry. */
1538 if (!warned_about_fast_tracepoints)
1539 {
1540 warning ("4-byte fast tracepoints not available; %s", errbuf);
1541 warned_about_fast_tracepoints = 1;
1542 }
1543 return 5;
1544 }
1545 }
1546 else
1547 {
1548 /* Indicate that the minimum length is currently unknown since the IPA
1549 has not loaded yet. */
1550 return 0;
1551 }
1552 }
1553
1554 static void
1555 add_insns (unsigned char *start, int len)
1556 {
1557 CORE_ADDR buildaddr = current_insn_ptr;
1558
1559 if (debug_threads)
1560 debug_printf ("Adding %d bytes of insn at %s\n",
1561 len, paddress (buildaddr));
1562
1563 append_insns (&buildaddr, len, start);
1564 current_insn_ptr = buildaddr;
1565 }
1566
1567 /* Our general strategy for emitting code is to avoid specifying raw
1568 bytes whenever possible, and instead copy a block of inline asm
1569 that is embedded in the function. This is a little messy, because
1570 we need to keep the compiler from discarding what looks like dead
1571 code, plus suppress various warnings. */
1572
1573 #define EMIT_ASM(NAME, INSNS) \
1574 do \
1575 { \
1576 extern unsigned char start_ ## NAME, end_ ## NAME; \
1577 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1578 __asm__ ("jmp end_" #NAME "\n" \
1579 "\t" "start_" #NAME ":" \
1580 "\t" INSNS "\n" \
1581 "\t" "end_" #NAME ":"); \
1582 } while (0)
1583
1584 #ifdef __x86_64__
1585
1586 #define EMIT_ASM32(NAME,INSNS) \
1587 do \
1588 { \
1589 extern unsigned char start_ ## NAME, end_ ## NAME; \
1590 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1591 __asm__ (".code32\n" \
1592 "\t" "jmp end_" #NAME "\n" \
1593 "\t" "start_" #NAME ":\n" \
1594 "\t" INSNS "\n" \
1595 "\t" "end_" #NAME ":\n" \
1596 ".code64\n"); \
1597 } while (0)
1598
1599 #else
1600
1601 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1602
1603 #endif
1604
1605 #ifdef __x86_64__
1606
1607 static void
1608 amd64_emit_prologue (void)
1609 {
1610 EMIT_ASM (amd64_prologue,
1611 "pushq %rbp\n\t"
1612 "movq %rsp,%rbp\n\t"
1613 "sub $0x20,%rsp\n\t"
1614 "movq %rdi,-8(%rbp)\n\t"
1615 "movq %rsi,-16(%rbp)");
1616 }
1617
1618
1619 static void
1620 amd64_emit_epilogue (void)
1621 {
1622 EMIT_ASM (amd64_epilogue,
1623 "movq -16(%rbp),%rdi\n\t"
1624 "movq %rax,(%rdi)\n\t"
1625 "xor %rax,%rax\n\t"
1626 "leave\n\t"
1627 "ret");
1628 }
1629
1630 static void
1631 amd64_emit_add (void)
1632 {
1633 EMIT_ASM (amd64_add,
1634 "add (%rsp),%rax\n\t"
1635 "lea 0x8(%rsp),%rsp");
1636 }
1637
1638 static void
1639 amd64_emit_sub (void)
1640 {
1641 EMIT_ASM (amd64_sub,
1642 "sub %rax,(%rsp)\n\t"
1643 "pop %rax");
1644 }
1645
1646 static void
1647 amd64_emit_mul (void)
1648 {
1649 emit_error = 1;
1650 }
1651
1652 static void
1653 amd64_emit_lsh (void)
1654 {
1655 emit_error = 1;
1656 }
1657
1658 static void
1659 amd64_emit_rsh_signed (void)
1660 {
1661 emit_error = 1;
1662 }
1663
1664 static void
1665 amd64_emit_rsh_unsigned (void)
1666 {
1667 emit_error = 1;
1668 }
1669
1670 static void
1671 amd64_emit_ext (int arg)
1672 {
1673 switch (arg)
1674 {
1675 case 8:
1676 EMIT_ASM (amd64_ext_8,
1677 "cbtw\n\t"
1678 "cwtl\n\t"
1679 "cltq");
1680 break;
1681 case 16:
1682 EMIT_ASM (amd64_ext_16,
1683 "cwtl\n\t"
1684 "cltq");
1685 break;
1686 case 32:
1687 EMIT_ASM (amd64_ext_32,
1688 "cltq");
1689 break;
1690 default:
1691 emit_error = 1;
1692 }
1693 }
1694
1695 static void
1696 amd64_emit_log_not (void)
1697 {
1698 EMIT_ASM (amd64_log_not,
1699 "test %rax,%rax\n\t"
1700 "sete %cl\n\t"
1701 "movzbq %cl,%rax");
1702 }
1703
1704 static void
1705 amd64_emit_bit_and (void)
1706 {
1707 EMIT_ASM (amd64_and,
1708 "and (%rsp),%rax\n\t"
1709 "lea 0x8(%rsp),%rsp");
1710 }
1711
1712 static void
1713 amd64_emit_bit_or (void)
1714 {
1715 EMIT_ASM (amd64_or,
1716 "or (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718 }
1719
1720 static void
1721 amd64_emit_bit_xor (void)
1722 {
1723 EMIT_ASM (amd64_xor,
1724 "xor (%rsp),%rax\n\t"
1725 "lea 0x8(%rsp),%rsp");
1726 }
1727
1728 static void
1729 amd64_emit_bit_not (void)
1730 {
1731 EMIT_ASM (amd64_bit_not,
1732 "xorq $0xffffffffffffffff,%rax");
1733 }
1734
1735 static void
1736 amd64_emit_equal (void)
1737 {
1738 EMIT_ASM (amd64_equal,
1739 "cmp %rax,(%rsp)\n\t"
1740 "je .Lamd64_equal_true\n\t"
1741 "xor %rax,%rax\n\t"
1742 "jmp .Lamd64_equal_end\n\t"
1743 ".Lamd64_equal_true:\n\t"
1744 "mov $0x1,%rax\n\t"
1745 ".Lamd64_equal_end:\n\t"
1746 "lea 0x8(%rsp),%rsp");
1747 }
1748
1749 static void
1750 amd64_emit_less_signed (void)
1751 {
1752 EMIT_ASM (amd64_less_signed,
1753 "cmp %rax,(%rsp)\n\t"
1754 "jl .Lamd64_less_signed_true\n\t"
1755 "xor %rax,%rax\n\t"
1756 "jmp .Lamd64_less_signed_end\n\t"
1757 ".Lamd64_less_signed_true:\n\t"
1758 "mov $1,%rax\n\t"
1759 ".Lamd64_less_signed_end:\n\t"
1760 "lea 0x8(%rsp),%rsp");
1761 }
1762
1763 static void
1764 amd64_emit_less_unsigned (void)
1765 {
1766 EMIT_ASM (amd64_less_unsigned,
1767 "cmp %rax,(%rsp)\n\t"
1768 "jb .Lamd64_less_unsigned_true\n\t"
1769 "xor %rax,%rax\n\t"
1770 "jmp .Lamd64_less_unsigned_end\n\t"
1771 ".Lamd64_less_unsigned_true:\n\t"
1772 "mov $1,%rax\n\t"
1773 ".Lamd64_less_unsigned_end:\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775 }
1776
1777 static void
1778 amd64_emit_ref (int size)
1779 {
1780 switch (size)
1781 {
1782 case 1:
1783 EMIT_ASM (amd64_ref1,
1784 "movb (%rax),%al");
1785 break;
1786 case 2:
1787 EMIT_ASM (amd64_ref2,
1788 "movw (%rax),%ax");
1789 break;
1790 case 4:
1791 EMIT_ASM (amd64_ref4,
1792 "movl (%rax),%eax");
1793 break;
1794 case 8:
1795 EMIT_ASM (amd64_ref8,
1796 "movq (%rax),%rax");
1797 break;
1798 }
1799 }
1800
1801 static void
1802 amd64_emit_if_goto (int *offset_p, int *size_p)
1803 {
1804 EMIT_ASM (amd64_if_goto,
1805 "mov %rax,%rcx\n\t"
1806 "pop %rax\n\t"
1807 "cmp $0,%rcx\n\t"
1808 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1809 if (offset_p)
1810 *offset_p = 10;
1811 if (size_p)
1812 *size_p = 4;
1813 }
1814
1815 static void
1816 amd64_emit_goto (int *offset_p, int *size_p)
1817 {
1818 EMIT_ASM (amd64_goto,
1819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1820 if (offset_p)
1821 *offset_p = 1;
1822 if (size_p)
1823 *size_p = 4;
1824 }
1825
1826 static void
1827 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1828 {
1829 int diff = (to - (from + size));
1830 unsigned char buf[sizeof (int)];
1831
1832 if (size != 4)
1833 {
1834 emit_error = 1;
1835 return;
1836 }
1837
1838 memcpy (buf, &diff, sizeof (int));
1839 target_write_memory (from, buf, sizeof (int));
1840 }
1841
1842 static void
1843 amd64_emit_const (LONGEST num)
1844 {
1845 unsigned char buf[16];
1846 int i;
1847 CORE_ADDR buildaddr = current_insn_ptr;
1848
1849 i = 0;
1850 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1851 memcpy (&buf[i], &num, sizeof (num));
1852 i += 8;
1853 append_insns (&buildaddr, i, buf);
1854 current_insn_ptr = buildaddr;
1855 }
1856
1857 static void
1858 amd64_emit_call (CORE_ADDR fn)
1859 {
1860 unsigned char buf[16];
1861 int i;
1862 CORE_ADDR buildaddr;
1863 LONGEST offset64;
1864
1865 /* The destination function being in the shared library, may be
1866 >31-bits away off the compiled code pad. */
1867
1868 buildaddr = current_insn_ptr;
1869
1870 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1871
1872 i = 0;
1873
1874 if (offset64 > INT_MAX || offset64 < INT_MIN)
1875 {
1876 /* Offset is too large for a call. Use callq, but that requires
1877 a register, so avoid it if possible. Use r10, since it is
1878 call-clobbered, we don't have to push/pop it. */
1879 buf[i++] = 0x48; /* mov $fn,%r10 */
1880 buf[i++] = 0xba;
1881 memcpy (buf + i, &fn, 8);
1882 i += 8;
1883 buf[i++] = 0xff; /* callq *%r10 */
1884 buf[i++] = 0xd2;
1885 }
1886 else
1887 {
1888 int offset32 = offset64; /* we know we can't overflow here. */
1889
1890 buf[i++] = 0xe8; /* call <reladdr> */
1891 memcpy (buf + i, &offset32, 4);
1892 i += 4;
1893 }
1894
1895 append_insns (&buildaddr, i, buf);
1896 current_insn_ptr = buildaddr;
1897 }
1898
1899 static void
1900 amd64_emit_reg (int reg)
1901 {
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr;
1905
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr = current_insn_ptr;
1908 i = 0;
1909 buf[i++] = 0xbe; /* mov $<n>,%esi */
1910 memcpy (&buf[i], &reg, sizeof (reg));
1911 i += 4;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1915 }
1916
1917 static void
1918 amd64_emit_pop (void)
1919 {
1920 EMIT_ASM (amd64_pop,
1921 "pop %rax");
1922 }
1923
1924 static void
1925 amd64_emit_stack_flush (void)
1926 {
1927 EMIT_ASM (amd64_stack_flush,
1928 "push %rax");
1929 }
1930
1931 static void
1932 amd64_emit_zero_ext (int arg)
1933 {
1934 switch (arg)
1935 {
1936 case 8:
1937 EMIT_ASM (amd64_zero_ext_8,
1938 "and $0xff,%rax");
1939 break;
1940 case 16:
1941 EMIT_ASM (amd64_zero_ext_16,
1942 "and $0xffff,%rax");
1943 break;
1944 case 32:
1945 EMIT_ASM (amd64_zero_ext_32,
1946 "mov $0xffffffff,%rcx\n\t"
1947 "and %rcx,%rax");
1948 break;
1949 default:
1950 emit_error = 1;
1951 }
1952 }
1953
1954 static void
1955 amd64_emit_swap (void)
1956 {
1957 EMIT_ASM (amd64_swap,
1958 "mov %rax,%rcx\n\t"
1959 "pop %rax\n\t"
1960 "push %rcx");
1961 }
1962
1963 static void
1964 amd64_emit_stack_adjust (int n)
1965 {
1966 unsigned char buf[16];
1967 int i;
1968 CORE_ADDR buildaddr = current_insn_ptr;
1969
1970 i = 0;
1971 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1972 buf[i++] = 0x8d;
1973 buf[i++] = 0x64;
1974 buf[i++] = 0x24;
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1976 buf[i++] = n * 8;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979 }
1980
1981 /* FN's prototype is `LONGEST(*fn)(int)'. */
1982
1983 static void
1984 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1985 {
1986 unsigned char buf[16];
1987 int i;
1988 CORE_ADDR buildaddr;
1989
1990 buildaddr = current_insn_ptr;
1991 i = 0;
1992 buf[i++] = 0xbf; /* movl $<n>,%edi */
1993 memcpy (&buf[i], &arg1, sizeof (arg1));
1994 i += 4;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997 amd64_emit_call (fn);
1998 }
1999
2000 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2001
2002 static void
2003 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2004 {
2005 unsigned char buf[16];
2006 int i;
2007 CORE_ADDR buildaddr;
2008
2009 buildaddr = current_insn_ptr;
2010 i = 0;
2011 buf[i++] = 0xbf; /* movl $<n>,%edi */
2012 memcpy (&buf[i], &arg1, sizeof (arg1));
2013 i += 4;
2014 append_insns (&buildaddr, i, buf);
2015 current_insn_ptr = buildaddr;
2016 EMIT_ASM (amd64_void_call_2_a,
2017 /* Save away a copy of the stack top. */
2018 "push %rax\n\t"
2019 /* Also pass top as the second argument. */
2020 "mov %rax,%rsi");
2021 amd64_emit_call (fn);
2022 EMIT_ASM (amd64_void_call_2_b,
2023 /* Restore the stack top, %rax may have been trashed. */
2024 "pop %rax");
2025 }
2026
2027 static void
2028 amd64_emit_eq_goto (int *offset_p, int *size_p)
2029 {
2030 EMIT_ASM (amd64_eq,
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax");
2040
2041 if (offset_p)
2042 *offset_p = 13;
2043 if (size_p)
2044 *size_p = 4;
2045 }
2046
2047 static void
2048 amd64_emit_ne_goto (int *offset_p, int *size_p)
2049 {
2050 EMIT_ASM (amd64_ne,
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax");
2060
2061 if (offset_p)
2062 *offset_p = 13;
2063 if (size_p)
2064 *size_p = 4;
2065 }
2066
2067 static void
2068 amd64_emit_lt_goto (int *offset_p, int *size_p)
2069 {
2070 EMIT_ASM (amd64_lt,
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax");
2080
2081 if (offset_p)
2082 *offset_p = 13;
2083 if (size_p)
2084 *size_p = 4;
2085 }
2086
2087 static void
2088 amd64_emit_le_goto (int *offset_p, int *size_p)
2089 {
2090 EMIT_ASM (amd64_le,
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax");
2100
2101 if (offset_p)
2102 *offset_p = 13;
2103 if (size_p)
2104 *size_p = 4;
2105 }
2106
2107 static void
2108 amd64_emit_gt_goto (int *offset_p, int *size_p)
2109 {
2110 EMIT_ASM (amd64_gt,
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax");
2120
2121 if (offset_p)
2122 *offset_p = 13;
2123 if (size_p)
2124 *size_p = 4;
2125 }
2126
2127 static void
2128 amd64_emit_ge_goto (int *offset_p, int *size_p)
2129 {
2130 EMIT_ASM (amd64_ge,
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2135 "pop %rax\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2140 "pop %rax");
2141
2142 if (offset_p)
2143 *offset_p = 13;
2144 if (size_p)
2145 *size_p = 4;
2146 }
2147
2148 struct emit_ops amd64_emit_ops =
2149 {
2150 amd64_emit_prologue,
2151 amd64_emit_epilogue,
2152 amd64_emit_add,
2153 amd64_emit_sub,
2154 amd64_emit_mul,
2155 amd64_emit_lsh,
2156 amd64_emit_rsh_signed,
2157 amd64_emit_rsh_unsigned,
2158 amd64_emit_ext,
2159 amd64_emit_log_not,
2160 amd64_emit_bit_and,
2161 amd64_emit_bit_or,
2162 amd64_emit_bit_xor,
2163 amd64_emit_bit_not,
2164 amd64_emit_equal,
2165 amd64_emit_less_signed,
2166 amd64_emit_less_unsigned,
2167 amd64_emit_ref,
2168 amd64_emit_if_goto,
2169 amd64_emit_goto,
2170 amd64_write_goto_address,
2171 amd64_emit_const,
2172 amd64_emit_call,
2173 amd64_emit_reg,
2174 amd64_emit_pop,
2175 amd64_emit_stack_flush,
2176 amd64_emit_zero_ext,
2177 amd64_emit_swap,
2178 amd64_emit_stack_adjust,
2179 amd64_emit_int_call_1,
2180 amd64_emit_void_call_2,
2181 amd64_emit_eq_goto,
2182 amd64_emit_ne_goto,
2183 amd64_emit_lt_goto,
2184 amd64_emit_le_goto,
2185 amd64_emit_gt_goto,
2186 amd64_emit_ge_goto
2187 };
2188
2189 #endif /* __x86_64__ */
2190
2191 static void
2192 i386_emit_prologue (void)
2193 {
2194 EMIT_ASM32 (i386_prologue,
2195 "push %ebp\n\t"
2196 "mov %esp,%ebp\n\t"
2197 "push %ebx");
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2200 }
2201
2202 static void
2203 i386_emit_epilogue (void)
2204 {
2205 EMIT_ASM32 (i386_epilogue,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2209 "xor %eax,%eax\n\t"
2210 "pop %ebx\n\t"
2211 "pop %ebp\n\t"
2212 "ret");
2213 }
2214
2215 static void
2216 i386_emit_add (void)
2217 {
2218 EMIT_ASM32 (i386_add,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2222 }
2223
2224 static void
2225 i386_emit_sub (void)
2226 {
2227 EMIT_ASM32 (i386_sub,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2230 "pop %eax\n\t"
2231 "pop %ebx\n\t");
2232 }
2233
2234 static void
2235 i386_emit_mul (void)
2236 {
2237 emit_error = 1;
2238 }
2239
2240 static void
2241 i386_emit_lsh (void)
2242 {
2243 emit_error = 1;
2244 }
2245
2246 static void
2247 i386_emit_rsh_signed (void)
2248 {
2249 emit_error = 1;
2250 }
2251
2252 static void
2253 i386_emit_rsh_unsigned (void)
2254 {
2255 emit_error = 1;
2256 }
2257
2258 static void
2259 i386_emit_ext (int arg)
2260 {
2261 switch (arg)
2262 {
2263 case 8:
2264 EMIT_ASM32 (i386_ext_8,
2265 "cbtw\n\t"
2266 "cwtl\n\t"
2267 "movl %eax,%ebx\n\t"
2268 "sarl $31,%ebx");
2269 break;
2270 case 16:
2271 EMIT_ASM32 (i386_ext_16,
2272 "cwtl\n\t"
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 case 32:
2277 EMIT_ASM32 (i386_ext_32,
2278 "movl %eax,%ebx\n\t"
2279 "sarl $31,%ebx");
2280 break;
2281 default:
2282 emit_error = 1;
2283 }
2284 }
2285
2286 static void
2287 i386_emit_log_not (void)
2288 {
2289 EMIT_ASM32 (i386_log_not,
2290 "or %ebx,%eax\n\t"
2291 "test %eax,%eax\n\t"
2292 "sete %cl\n\t"
2293 "xor %ebx,%ebx\n\t"
2294 "movzbl %cl,%eax");
2295 }
2296
2297 static void
2298 i386_emit_bit_and (void)
2299 {
2300 EMIT_ASM32 (i386_and,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304 }
2305
2306 static void
2307 i386_emit_bit_or (void)
2308 {
2309 EMIT_ASM32 (i386_or,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2313 }
2314
2315 static void
2316 i386_emit_bit_xor (void)
2317 {
2318 EMIT_ASM32 (i386_xor,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322 }
2323
2324 static void
2325 i386_emit_bit_not (void)
2326 {
2327 EMIT_ASM32 (i386_bit_not,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2330 }
2331
2332 static void
2333 i386_emit_equal (void)
2334 {
2335 EMIT_ASM32 (i386_equal,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2341 "xor %eax,%eax\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2344 "mov $1,%eax\n\t"
2345 ".Li386_equal_end:\n\t"
2346 "xor %ebx,%ebx\n\t"
2347 "lea 0x8(%esp),%esp");
2348 }
2349
2350 static void
2351 i386_emit_less_signed (void)
2352 {
2353 EMIT_ASM32 (i386_less_signed,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2360 "xor %eax,%eax\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2363 "mov $1,%eax\n\t"
2364 ".Li386_less_signed_end:\n\t"
2365 "xor %ebx,%ebx\n\t"
2366 "lea 0x8(%esp),%esp");
2367 }
2368
2369 static void
2370 i386_emit_less_unsigned (void)
2371 {
2372 EMIT_ASM32 (i386_less_unsigned,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2379 "xor %eax,%eax\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2382 "mov $1,%eax\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2384 "xor %ebx,%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386 }
2387
2388 static void
2389 i386_emit_ref (int size)
2390 {
2391 switch (size)
2392 {
2393 case 1:
2394 EMIT_ASM32 (i386_ref1,
2395 "movb (%eax),%al");
2396 break;
2397 case 2:
2398 EMIT_ASM32 (i386_ref2,
2399 "movw (%eax),%ax");
2400 break;
2401 case 4:
2402 EMIT_ASM32 (i386_ref4,
2403 "movl (%eax),%eax");
2404 break;
2405 case 8:
2406 EMIT_ASM32 (i386_ref8,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2409 break;
2410 }
2411 }
2412
2413 static void
2414 i386_emit_if_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM32 (i386_if_goto,
2417 "mov %eax,%ecx\n\t"
2418 "or %ebx,%ecx\n\t"
2419 "pop %eax\n\t"
2420 "pop %ebx\n\t"
2421 "cmpl $0,%ecx\n\t"
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2424
2425 if (offset_p)
2426 *offset_p = 11; /* be sure that this matches the sequence above */
2427 if (size_p)
2428 *size_p = 4;
2429 }
2430
2431 static void
2432 i386_emit_goto (int *offset_p, int *size_p)
2433 {
2434 EMIT_ASM32 (i386_goto,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2437 if (offset_p)
2438 *offset_p = 1;
2439 if (size_p)
2440 *size_p = 4;
2441 }
2442
2443 static void
2444 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2445 {
2446 int diff = (to - (from + size));
2447 unsigned char buf[sizeof (int)];
2448
2449 /* We're only doing 4-byte sizes at the moment. */
2450 if (size != 4)
2451 {
2452 emit_error = 1;
2453 return;
2454 }
2455
2456 memcpy (buf, &diff, sizeof (int));
2457 target_write_memory (from, buf, sizeof (int));
2458 }
2459
2460 static void
2461 i386_emit_const (LONGEST num)
2462 {
2463 unsigned char buf[16];
2464 int i, hi, lo;
2465 CORE_ADDR buildaddr = current_insn_ptr;
2466
2467 i = 0;
2468 buf[i++] = 0xb8; /* mov $<n>,%eax */
2469 lo = num & 0xffffffff;
2470 memcpy (&buf[i], &lo, sizeof (lo));
2471 i += 4;
2472 hi = ((num >> 32) & 0xffffffff);
2473 if (hi)
2474 {
2475 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2476 memcpy (&buf[i], &hi, sizeof (hi));
2477 i += 4;
2478 }
2479 else
2480 {
2481 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2482 }
2483 append_insns (&buildaddr, i, buf);
2484 current_insn_ptr = buildaddr;
2485 }
2486
2487 static void
2488 i386_emit_call (CORE_ADDR fn)
2489 {
2490 unsigned char buf[16];
2491 int i, offset;
2492 CORE_ADDR buildaddr;
2493
2494 buildaddr = current_insn_ptr;
2495 i = 0;
2496 buf[i++] = 0xe8; /* call <reladdr> */
2497 offset = ((int) fn) - (buildaddr + 5);
2498 memcpy (buf + 1, &offset, 4);
2499 append_insns (&buildaddr, 5, buf);
2500 current_insn_ptr = buildaddr;
2501 }
2502
2503 static void
2504 i386_emit_reg (int reg)
2505 {
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr;
2509
2510 EMIT_ASM32 (i386_reg_a,
2511 "sub $0x8,%esp");
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xb8; /* mov $<n>,%eax */
2515 memcpy (&buf[i], &reg, sizeof (reg));
2516 i += 4;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519 EMIT_ASM32 (i386_reg_b,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2522 "mov %eax,(%esp)");
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c,
2525 "xor %ebx,%ebx\n\t"
2526 "lea 0x8(%esp),%esp");
2527 }
2528
2529 static void
2530 i386_emit_pop (void)
2531 {
2532 EMIT_ASM32 (i386_pop,
2533 "pop %eax\n\t"
2534 "pop %ebx");
2535 }
2536
2537 static void
2538 i386_emit_stack_flush (void)
2539 {
2540 EMIT_ASM32 (i386_stack_flush,
2541 "push %ebx\n\t"
2542 "push %eax");
2543 }
2544
2545 static void
2546 i386_emit_zero_ext (int arg)
2547 {
2548 switch (arg)
2549 {
2550 case 8:
2551 EMIT_ASM32 (i386_zero_ext_8,
2552 "and $0xff,%eax\n\t"
2553 "xor %ebx,%ebx");
2554 break;
2555 case 16:
2556 EMIT_ASM32 (i386_zero_ext_16,
2557 "and $0xffff,%eax\n\t"
2558 "xor %ebx,%ebx");
2559 break;
2560 case 32:
2561 EMIT_ASM32 (i386_zero_ext_32,
2562 "xor %ebx,%ebx");
2563 break;
2564 default:
2565 emit_error = 1;
2566 }
2567 }
2568
2569 static void
2570 i386_emit_swap (void)
2571 {
2572 EMIT_ASM32 (i386_swap,
2573 "mov %eax,%ecx\n\t"
2574 "mov %ebx,%edx\n\t"
2575 "pop %eax\n\t"
2576 "pop %ebx\n\t"
2577 "push %edx\n\t"
2578 "push %ecx");
2579 }
2580
2581 static void
2582 i386_emit_stack_adjust (int n)
2583 {
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr = current_insn_ptr;
2587
2588 i = 0;
2589 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2590 buf[i++] = 0x64;
2591 buf[i++] = 0x24;
2592 buf[i++] = n * 8;
2593 append_insns (&buildaddr, i, buf);
2594 current_insn_ptr = buildaddr;
2595 }
2596
2597 /* FN's prototype is `LONGEST(*fn)(int)'. */
2598
2599 static void
2600 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2601 {
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr;
2605
2606 EMIT_ASM32 (i386_int_call_1_a,
2607 /* Reserve a bit of stack space. */
2608 "sub $0x8,%esp");
2609 /* Put the one argument on the stack. */
2610 buildaddr = current_insn_ptr;
2611 i = 0;
2612 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2613 buf[i++] = 0x04;
2614 buf[i++] = 0x24;
2615 memcpy (&buf[i], &arg1, sizeof (arg1));
2616 i += 4;
2617 append_insns (&buildaddr, i, buf);
2618 current_insn_ptr = buildaddr;
2619 i386_emit_call (fn);
2620 EMIT_ASM32 (i386_int_call_1_c,
2621 "mov %edx,%ebx\n\t"
2622 "lea 0x8(%esp),%esp");
2623 }
2624
2625 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2626
2627 static void
2628 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2629 {
2630 unsigned char buf[16];
2631 int i;
2632 CORE_ADDR buildaddr;
2633
2634 EMIT_ASM32 (i386_void_call_2_a,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2636 "push %eax\n\t"
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr = current_insn_ptr;
2646 i = 0;
2647 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2648 buf[i++] = 0x04;
2649 buf[i++] = 0x24;
2650 memcpy (&buf[i], &arg1, sizeof (arg1));
2651 i += 4;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 i386_emit_call (fn);
2655 EMIT_ASM32 (i386_void_call_2_b,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2658 "pop %eax");
2659 }
2660
2661
2662 static void
2663 i386_emit_eq_goto (int *offset_p, int *size_p)
2664 {
2665 EMIT_ASM32 (eq,
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2672 "pop %eax\n\t"
2673 "pop %ebx\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx");
2680
2681 if (offset_p)
2682 *offset_p = 18;
2683 if (size_p)
2684 *size_p = 4;
2685 }
2686
2687 static void
2688 i386_emit_ne_goto (int *offset_p, int *size_p)
2689 {
2690 EMIT_ASM32 (ne,
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2693 "jne .Lne_jump\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2696 ".Lne_jump:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx");
2706
2707 if (offset_p)
2708 *offset_p = 18;
2709 if (size_p)
2710 *size_p = 4;
2711 }
2712
2713 static void
2714 i386_emit_lt_goto (int *offset_p, int *size_p)
2715 {
2716 EMIT_ASM32 (lt,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jl .Llt_jump\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2722 ".Llt_jump:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2724 "pop %eax\n\t"
2725 "pop %ebx\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx");
2732
2733 if (offset_p)
2734 *offset_p = 20;
2735 if (size_p)
2736 *size_p = 4;
2737 }
2738
2739 static void
2740 i386_emit_le_goto (int *offset_p, int *size_p)
2741 {
2742 EMIT_ASM32 (le,
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jle .Lle_jump\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2748 ".Lle_jump:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 20;
2761 if (size_p)
2762 *size_p = 4;
2763 }
2764
2765 static void
2766 i386_emit_gt_goto (int *offset_p, int *size_p)
2767 {
2768 EMIT_ASM32 (gt,
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "jg .Lgt_jump\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2774 ".Lgt_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 20;
2787 if (size_p)
2788 *size_p = 4;
2789 }
2790
2791 static void
2792 i386_emit_ge_goto (int *offset_p, int *size_p)
2793 {
2794 EMIT_ASM32 (ge,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jge .Lge_jump\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2800 ".Lge_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815 }
2816
2817 struct emit_ops i386_emit_ops =
2818 {
2819 i386_emit_prologue,
2820 i386_emit_epilogue,
2821 i386_emit_add,
2822 i386_emit_sub,
2823 i386_emit_mul,
2824 i386_emit_lsh,
2825 i386_emit_rsh_signed,
2826 i386_emit_rsh_unsigned,
2827 i386_emit_ext,
2828 i386_emit_log_not,
2829 i386_emit_bit_and,
2830 i386_emit_bit_or,
2831 i386_emit_bit_xor,
2832 i386_emit_bit_not,
2833 i386_emit_equal,
2834 i386_emit_less_signed,
2835 i386_emit_less_unsigned,
2836 i386_emit_ref,
2837 i386_emit_if_goto,
2838 i386_emit_goto,
2839 i386_write_goto_address,
2840 i386_emit_const,
2841 i386_emit_call,
2842 i386_emit_reg,
2843 i386_emit_pop,
2844 i386_emit_stack_flush,
2845 i386_emit_zero_ext,
2846 i386_emit_swap,
2847 i386_emit_stack_adjust,
2848 i386_emit_int_call_1,
2849 i386_emit_void_call_2,
2850 i386_emit_eq_goto,
2851 i386_emit_ne_goto,
2852 i386_emit_lt_goto,
2853 i386_emit_le_goto,
2854 i386_emit_gt_goto,
2855 i386_emit_ge_goto
2856 };
2857
2858
2859 static struct emit_ops *
2860 x86_emit_ops (void)
2861 {
2862 #ifdef __x86_64__
2863 if (is_64bit_tdesc ())
2864 return &amd64_emit_ops;
2865 else
2866 #endif
2867 return &i386_emit_ops;
2868 }
2869
2870 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2871
2872 const gdb_byte *
2873 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2874 {
2875 *size = x86_breakpoint_len;
2876 return x86_breakpoint;
2877 }
2878
2879 static int
2880 x86_supports_range_stepping (void)
2881 {
2882 return 1;
2883 }
2884
2885 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2886 */
2887
2888 static int
2889 x86_supports_hardware_single_step (void)
2890 {
2891 return 1;
2892 }
2893
2894 static int
2895 x86_get_ipa_tdesc_idx (void)
2896 {
2897 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2898 const struct target_desc *tdesc = regcache->tdesc;
2899
2900 #ifdef __x86_64__
2901 return amd64_get_ipa_tdesc_idx (tdesc);
2902 #endif
2903
2904 if (tdesc == tdesc_i386_linux_no_xml)
2905 return X86_TDESC_SSE;
2906
2907 return i386_get_ipa_tdesc_idx (tdesc);
2908 }
2909
2910 /* This is initialized assuming an amd64 target.
2911 x86_arch_setup will correct it for i386 or amd64 targets. */
2912
2913 struct linux_target_ops the_low_target =
2914 {
2915 x86_insert_point,
2916 x86_remove_point,
2917 x86_stopped_by_watchpoint,
2918 x86_stopped_data_address,
2919 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2920 native i386 case (no registers smaller than an xfer unit), and are not
2921 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2922 NULL,
2923 NULL,
2924 /* need to fix up i386 siginfo if host is amd64 */
2925 x86_siginfo_fixup,
2926 x86_linux_new_process,
2927 x86_linux_delete_process,
2928 x86_linux_new_thread,
2929 x86_linux_delete_thread,
2930 x86_linux_new_fork,
2931 x86_linux_prepare_to_resume,
2932 x86_linux_process_qsupported,
2933 x86_supports_tracepoints,
2934 x86_get_thread_area,
2935 x86_install_fast_tracepoint_jump_pad,
2936 x86_emit_ops,
2937 x86_get_min_fast_tracepoint_insn_len,
2938 x86_supports_range_stepping,
2939 x86_supports_hardware_single_step,
2940 x86_get_syscall_trapinfo,
2941 x86_get_ipa_tdesc_idx,
2942 };
2943
2944 /* The linux target ops object. */
2945
2946 linux_process_target *the_linux_target = &the_x86_target;
2947
2948 void
2949 initialize_low_arch (void)
2950 {
2951 /* Initialize the Linux target descriptions. */
2952 #ifdef __x86_64__
2953 tdesc_amd64_linux_no_xml = allocate_target_description ();
2954 copy_target_description (tdesc_amd64_linux_no_xml,
2955 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2956 false));
2957 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2958 #endif
2959
2960 tdesc_i386_linux_no_xml = allocate_target_description ();
2961 copy_target_description (tdesc_i386_linux_no_xml,
2962 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2963 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2964
2965 initialize_regsets_info (&x86_regsets_info);
2966 }
This page took 0.095314 seconds and 4 git commands to generate.