gdbserver/linux-low: turn 'get_pc' and 'set_pc' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 protected:
110
111 void low_arch_setup () override;
112
113 bool low_cannot_fetch_register (int regno) override;
114
115 bool low_cannot_store_register (int regno) override;
116
117 bool low_supports_breakpoints () override;
118
119 CORE_ADDR low_get_pc (regcache *regcache) override;
120
121 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
122 };
123
124 /* The singleton target ops object. */
125
126 static x86_target the_x86_target;
127
128 /* Per-process arch-specific data we want to keep. */
129
130 struct arch_process_info
131 {
132 struct x86_debug_reg_state debug_reg_state;
133 };
134
135 #ifdef __x86_64__
136
137 /* Mapping between the general-purpose registers in `struct user'
138 format and GDB's register array layout.
139 Note that the transfer layout uses 64-bit regs. */
140 static /*const*/ int i386_regmap[] =
141 {
142 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
143 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
144 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
145 DS * 8, ES * 8, FS * 8, GS * 8
146 };
147
148 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
149
150 /* So code below doesn't have to care, i386 or amd64. */
151 #define ORIG_EAX ORIG_RAX
152 #define REGSIZE 8
153
154 static const int x86_64_regmap[] =
155 {
156 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
157 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
158 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
159 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8,
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1,
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 ORIG_RAX * 8,
168 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
169 21 * 8, 22 * 8,
170 #else
171 -1, -1,
172 #endif
173 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
174 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
175 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
180 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1 /* pkru */
185 };
186
187 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
188 #define X86_64_USER_REGS (GS + 1)
189
190 #else /* ! __x86_64__ */
191
192 /* Mapping between the general-purpose registers in `struct user'
193 format and GDB's register array layout. */
194 static /*const*/ int i386_regmap[] =
195 {
196 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
197 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
198 EIP * 4, EFL * 4, CS * 4, SS * 4,
199 DS * 4, ES * 4, FS * 4, GS * 4
200 };
201
202 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
203
204 #define REGSIZE 4
205
206 #endif
207
208 #ifdef __x86_64__
209
210 /* Returns true if the current inferior belongs to a x86-64 process,
211 per the tdesc. */
212
213 static int
214 is_64bit_tdesc (void)
215 {
216 struct regcache *regcache = get_thread_regcache (current_thread, 0);
217
218 return register_size (regcache->tdesc, 0) == 8;
219 }
220
221 #endif
222
223 \f
224 /* Called by libthread_db. */
225
226 ps_err_e
227 ps_get_thread_area (struct ps_prochandle *ph,
228 lwpid_t lwpid, int idx, void **base)
229 {
230 #ifdef __x86_64__
231 int use_64bit = is_64bit_tdesc ();
232
233 if (use_64bit)
234 {
235 switch (idx)
236 {
237 case FS:
238 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
239 return PS_OK;
240 break;
241 case GS:
242 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
243 return PS_OK;
244 break;
245 default:
246 return PS_BADADDR;
247 }
248 return PS_ERR;
249 }
250 #endif
251
252 {
253 unsigned int desc[4];
254
255 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
256 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
257 return PS_ERR;
258
259 /* Ensure we properly extend the value to 64-bits for x86_64. */
260 *base = (void *) (uintptr_t) desc[1];
261 return PS_OK;
262 }
263 }
264
265 /* Get the thread area address. This is used to recognize which
266 thread is which when tracing with the in-process agent library. We
267 don't read anything from the address, and treat it as opaque; it's
268 the address itself that we assume is unique per-thread. */
269
270 static int
271 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
272 {
273 #ifdef __x86_64__
274 int use_64bit = is_64bit_tdesc ();
275
276 if (use_64bit)
277 {
278 void *base;
279 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
280 {
281 *addr = (CORE_ADDR) (uintptr_t) base;
282 return 0;
283 }
284
285 return -1;
286 }
287 #endif
288
289 {
290 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
291 struct thread_info *thr = get_lwp_thread (lwp);
292 struct regcache *regcache = get_thread_regcache (thr, 1);
293 unsigned int desc[4];
294 ULONGEST gs = 0;
295 const int reg_thread_area = 3; /* bits to scale down register value. */
296 int idx;
297
298 collect_register_by_name (regcache, "gs", &gs);
299
300 idx = gs >> reg_thread_area;
301
302 if (ptrace (PTRACE_GET_THREAD_AREA,
303 lwpid_of (thr),
304 (void *) (long) idx, (unsigned long) &desc) < 0)
305 return -1;
306
307 *addr = desc[1];
308 return 0;
309 }
310 }
311
312
313 \f
314 bool
315 x86_target::low_cannot_store_register (int regno)
316 {
317 #ifdef __x86_64__
318 if (is_64bit_tdesc ())
319 return false;
320 #endif
321
322 return regno >= I386_NUM_REGS;
323 }
324
325 bool
326 x86_target::low_cannot_fetch_register (int regno)
327 {
328 #ifdef __x86_64__
329 if (is_64bit_tdesc ())
330 return false;
331 #endif
332
333 return regno >= I386_NUM_REGS;
334 }
335
336 static void
337 x86_fill_gregset (struct regcache *regcache, void *buf)
338 {
339 int i;
340
341 #ifdef __x86_64__
342 if (register_size (regcache->tdesc, 0) == 8)
343 {
344 for (i = 0; i < X86_64_NUM_REGS; i++)
345 if (x86_64_regmap[i] != -1)
346 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
347
348 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
349 {
350 unsigned long base;
351 int lwpid = lwpid_of (current_thread);
352
353 collect_register_by_name (regcache, "fs_base", &base);
354 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
355
356 collect_register_by_name (regcache, "gs_base", &base);
357 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
358 }
359 #endif
360
361 return;
362 }
363
364 /* 32-bit inferior registers need to be zero-extended.
365 Callers would read uninitialized memory otherwise. */
366 memset (buf, 0x00, X86_64_USER_REGS * 8);
367 #endif
368
369 for (i = 0; i < I386_NUM_REGS; i++)
370 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
371
372 collect_register_by_name (regcache, "orig_eax",
373 ((char *) buf) + ORIG_EAX * REGSIZE);
374
375 #ifdef __x86_64__
376 /* Sign extend EAX value to avoid potential syscall restart
377 problems.
378
379 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
380 for a detailed explanation. */
381 if (register_size (regcache->tdesc, 0) == 4)
382 {
383 void *ptr = ((gdb_byte *) buf
384 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
385
386 *(int64_t *) ptr = *(int32_t *) ptr;
387 }
388 #endif
389 }
390
391 static void
392 x86_store_gregset (struct regcache *regcache, const void *buf)
393 {
394 int i;
395
396 #ifdef __x86_64__
397 if (register_size (regcache->tdesc, 0) == 8)
398 {
399 for (i = 0; i < X86_64_NUM_REGS; i++)
400 if (x86_64_regmap[i] != -1)
401 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
402
403 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
404 {
405 unsigned long base;
406 int lwpid = lwpid_of (current_thread);
407
408 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
409 supply_register_by_name (regcache, "fs_base", &base);
410
411 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
412 supply_register_by_name (regcache, "gs_base", &base);
413 }
414 #endif
415 return;
416 }
417 #endif
418
419 for (i = 0; i < I386_NUM_REGS; i++)
420 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
421
422 supply_register_by_name (regcache, "orig_eax",
423 ((char *) buf) + ORIG_EAX * REGSIZE);
424 }
425
426 static void
427 x86_fill_fpregset (struct regcache *regcache, void *buf)
428 {
429 #ifdef __x86_64__
430 i387_cache_to_fxsave (regcache, buf);
431 #else
432 i387_cache_to_fsave (regcache, buf);
433 #endif
434 }
435
436 static void
437 x86_store_fpregset (struct regcache *regcache, const void *buf)
438 {
439 #ifdef __x86_64__
440 i387_fxsave_to_cache (regcache, buf);
441 #else
442 i387_fsave_to_cache (regcache, buf);
443 #endif
444 }
445
446 #ifndef __x86_64__
447
448 static void
449 x86_fill_fpxregset (struct regcache *regcache, void *buf)
450 {
451 i387_cache_to_fxsave (regcache, buf);
452 }
453
454 static void
455 x86_store_fpxregset (struct regcache *regcache, const void *buf)
456 {
457 i387_fxsave_to_cache (regcache, buf);
458 }
459
460 #endif
461
462 static void
463 x86_fill_xstateregset (struct regcache *regcache, void *buf)
464 {
465 i387_cache_to_xsave (regcache, buf);
466 }
467
468 static void
469 x86_store_xstateregset (struct regcache *regcache, const void *buf)
470 {
471 i387_xsave_to_cache (regcache, buf);
472 }
473
474 /* ??? The non-biarch i386 case stores all the i387 regs twice.
475 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
476 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
477 doesn't work. IWBN to avoid the duplication in the case where it
478 does work. Maybe the arch_setup routine could check whether it works
479 and update the supported regsets accordingly. */
480
481 static struct regset_info x86_regsets[] =
482 {
483 #ifdef HAVE_PTRACE_GETREGS
484 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
485 GENERAL_REGS,
486 x86_fill_gregset, x86_store_gregset },
487 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
488 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
489 # ifndef __x86_64__
490 # ifdef HAVE_PTRACE_GETFPXREGS
491 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
492 EXTENDED_REGS,
493 x86_fill_fpxregset, x86_store_fpxregset },
494 # endif
495 # endif
496 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
497 FP_REGS,
498 x86_fill_fpregset, x86_store_fpregset },
499 #endif /* HAVE_PTRACE_GETREGS */
500 NULL_REGSET
501 };
502
503 bool
504 x86_target::low_supports_breakpoints ()
505 {
506 return true;
507 }
508
509 CORE_ADDR
510 x86_target::low_get_pc (regcache *regcache)
511 {
512 int use_64bit = register_size (regcache->tdesc, 0) == 8;
513
514 if (use_64bit)
515 {
516 uint64_t pc;
517
518 collect_register_by_name (regcache, "rip", &pc);
519 return (CORE_ADDR) pc;
520 }
521 else
522 {
523 uint32_t pc;
524
525 collect_register_by_name (regcache, "eip", &pc);
526 return (CORE_ADDR) pc;
527 }
528 }
529
530 void
531 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
532 {
533 int use_64bit = register_size (regcache->tdesc, 0) == 8;
534
535 if (use_64bit)
536 {
537 uint64_t newpc = pc;
538
539 supply_register_by_name (regcache, "rip", &newpc);
540 }
541 else
542 {
543 uint32_t newpc = pc;
544
545 supply_register_by_name (regcache, "eip", &newpc);
546 }
547 }
548 \f
549 static const gdb_byte x86_breakpoint[] = { 0xCC };
550 #define x86_breakpoint_len 1
551
552 static int
553 x86_breakpoint_at (CORE_ADDR pc)
554 {
555 unsigned char c;
556
557 the_target->read_memory (pc, &c, 1);
558 if (c == 0xCC)
559 return 1;
560
561 return 0;
562 }
563 \f
564 /* Low-level function vector. */
565 struct x86_dr_low_type x86_dr_low =
566 {
567 x86_linux_dr_set_control,
568 x86_linux_dr_set_addr,
569 x86_linux_dr_get_addr,
570 x86_linux_dr_get_status,
571 x86_linux_dr_get_control,
572 sizeof (void *),
573 };
574 \f
575 /* Breakpoint/Watchpoint support. */
576
577 static int
578 x86_supports_z_point_type (char z_type)
579 {
580 switch (z_type)
581 {
582 case Z_PACKET_SW_BP:
583 case Z_PACKET_HW_BP:
584 case Z_PACKET_WRITE_WP:
585 case Z_PACKET_ACCESS_WP:
586 return 1;
587 default:
588 return 0;
589 }
590 }
591
592 static int
593 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
594 int size, struct raw_breakpoint *bp)
595 {
596 struct process_info *proc = current_process ();
597
598 switch (type)
599 {
600 case raw_bkpt_type_hw:
601 case raw_bkpt_type_write_wp:
602 case raw_bkpt_type_access_wp:
603 {
604 enum target_hw_bp_type hw_type
605 = raw_bkpt_type_to_target_hw_bp_type (type);
606 struct x86_debug_reg_state *state
607 = &proc->priv->arch_private->debug_reg_state;
608
609 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
610 }
611
612 default:
613 /* Unsupported. */
614 return 1;
615 }
616 }
617
618 static int
619 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
620 int size, struct raw_breakpoint *bp)
621 {
622 struct process_info *proc = current_process ();
623
624 switch (type)
625 {
626 case raw_bkpt_type_hw:
627 case raw_bkpt_type_write_wp:
628 case raw_bkpt_type_access_wp:
629 {
630 enum target_hw_bp_type hw_type
631 = raw_bkpt_type_to_target_hw_bp_type (type);
632 struct x86_debug_reg_state *state
633 = &proc->priv->arch_private->debug_reg_state;
634
635 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
636 }
637 default:
638 /* Unsupported. */
639 return 1;
640 }
641 }
642
643 static int
644 x86_stopped_by_watchpoint (void)
645 {
646 struct process_info *proc = current_process ();
647 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
648 }
649
650 static CORE_ADDR
651 x86_stopped_data_address (void)
652 {
653 struct process_info *proc = current_process ();
654 CORE_ADDR addr;
655 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
656 &addr))
657 return addr;
658 return 0;
659 }
660 \f
661 /* Called when a new process is created. */
662
663 static struct arch_process_info *
664 x86_linux_new_process (void)
665 {
666 struct arch_process_info *info = XCNEW (struct arch_process_info);
667
668 x86_low_init_dregs (&info->debug_reg_state);
669
670 return info;
671 }
672
673 /* Called when a process is being deleted. */
674
675 static void
676 x86_linux_delete_process (struct arch_process_info *info)
677 {
678 xfree (info);
679 }
680
681 /* Target routine for linux_new_fork. */
682
683 static void
684 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
685 {
686 /* These are allocated by linux_add_process. */
687 gdb_assert (parent->priv != NULL
688 && parent->priv->arch_private != NULL);
689 gdb_assert (child->priv != NULL
690 && child->priv->arch_private != NULL);
691
692 /* Linux kernel before 2.6.33 commit
693 72f674d203cd230426437cdcf7dd6f681dad8b0d
694 will inherit hardware debug registers from parent
695 on fork/vfork/clone. Newer Linux kernels create such tasks with
696 zeroed debug registers.
697
698 GDB core assumes the child inherits the watchpoints/hw
699 breakpoints of the parent, and will remove them all from the
700 forked off process. Copy the debug registers mirrors into the
701 new process so that all breakpoints and watchpoints can be
702 removed together. The debug registers mirror will become zeroed
703 in the end before detaching the forked off process, thus making
704 this compatible with older Linux kernels too. */
705
706 *child->priv->arch_private = *parent->priv->arch_private;
707 }
708
709 /* See nat/x86-dregs.h. */
710
711 struct x86_debug_reg_state *
712 x86_debug_reg_state (pid_t pid)
713 {
714 struct process_info *proc = find_process_pid (pid);
715
716 return &proc->priv->arch_private->debug_reg_state;
717 }
718 \f
719 /* When GDBSERVER is built as a 64-bit application on linux, the
720 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
721 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
722 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
723 conversion in-place ourselves. */
724
725 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
726 layout of the inferiors' architecture. Returns true if any
727 conversion was done; false otherwise. If DIRECTION is 1, then copy
728 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
729 INF. */
730
731 static int
732 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
733 {
734 #ifdef __x86_64__
735 unsigned int machine;
736 int tid = lwpid_of (current_thread);
737 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
738
739 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
740 if (!is_64bit_tdesc ())
741 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
742 FIXUP_32);
743 /* No fixup for native x32 GDB. */
744 else if (!is_elf64 && sizeof (void *) == 8)
745 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
746 FIXUP_X32);
747 #endif
748
749 return 0;
750 }
751 \f
752 static int use_xml;
753
754 /* Format of XSAVE extended state is:
755 struct
756 {
757 fxsave_bytes[0..463]
758 sw_usable_bytes[464..511]
759 xstate_hdr_bytes[512..575]
760 avx_bytes[576..831]
761 future_state etc
762 };
763
764 Same memory layout will be used for the coredump NT_X86_XSTATE
765 representing the XSAVE extended state registers.
766
767 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
768 extended state mask, which is the same as the extended control register
769 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
770 together with the mask saved in the xstate_hdr_bytes to determine what
771 states the processor/OS supports and what state, used or initialized,
772 the process/thread is in. */
773 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
774
775 /* Does the current host support the GETFPXREGS request? The header
776 file may or may not define it, and even if it is defined, the
777 kernel will return EIO if it's running on a pre-SSE processor. */
778 int have_ptrace_getfpxregs =
779 #ifdef HAVE_PTRACE_GETFPXREGS
780 -1
781 #else
782 0
783 #endif
784 ;
785
786 /* Get Linux/x86 target description from running target. */
787
788 static const struct target_desc *
789 x86_linux_read_description (void)
790 {
791 unsigned int machine;
792 int is_elf64;
793 int xcr0_features;
794 int tid;
795 static uint64_t xcr0;
796 struct regset_info *regset;
797
798 tid = lwpid_of (current_thread);
799
800 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
801
802 if (sizeof (void *) == 4)
803 {
804 if (is_elf64 > 0)
805 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
806 #ifndef __x86_64__
807 else if (machine == EM_X86_64)
808 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
809 #endif
810 }
811
812 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
813 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
814 {
815 elf_fpxregset_t fpxregs;
816
817 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
818 {
819 have_ptrace_getfpxregs = 0;
820 have_ptrace_getregset = 0;
821 return i386_linux_read_description (X86_XSTATE_X87);
822 }
823 else
824 have_ptrace_getfpxregs = 1;
825 }
826 #endif
827
828 if (!use_xml)
829 {
830 x86_xcr0 = X86_XSTATE_SSE_MASK;
831
832 /* Don't use XML. */
833 #ifdef __x86_64__
834 if (machine == EM_X86_64)
835 return tdesc_amd64_linux_no_xml;
836 else
837 #endif
838 return tdesc_i386_linux_no_xml;
839 }
840
841 if (have_ptrace_getregset == -1)
842 {
843 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
844 struct iovec iov;
845
846 iov.iov_base = xstateregs;
847 iov.iov_len = sizeof (xstateregs);
848
849 /* Check if PTRACE_GETREGSET works. */
850 if (ptrace (PTRACE_GETREGSET, tid,
851 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
852 have_ptrace_getregset = 0;
853 else
854 {
855 have_ptrace_getregset = 1;
856
857 /* Get XCR0 from XSAVE extended state. */
858 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
859 / sizeof (uint64_t))];
860
861 /* Use PTRACE_GETREGSET if it is available. */
862 for (regset = x86_regsets;
863 regset->fill_function != NULL; regset++)
864 if (regset->get_request == PTRACE_GETREGSET)
865 regset->size = X86_XSTATE_SIZE (xcr0);
866 else if (regset->type != GENERAL_REGS)
867 regset->size = 0;
868 }
869 }
870
871 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
872 xcr0_features = (have_ptrace_getregset
873 && (xcr0 & X86_XSTATE_ALL_MASK));
874
875 if (xcr0_features)
876 x86_xcr0 = xcr0;
877
878 if (machine == EM_X86_64)
879 {
880 #ifdef __x86_64__
881 const target_desc *tdesc = NULL;
882
883 if (xcr0_features)
884 {
885 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
886 !is_elf64);
887 }
888
889 if (tdesc == NULL)
890 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
891 return tdesc;
892 #endif
893 }
894 else
895 {
896 const target_desc *tdesc = NULL;
897
898 if (xcr0_features)
899 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
900
901 if (tdesc == NULL)
902 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
903
904 return tdesc;
905 }
906
907 gdb_assert_not_reached ("failed to return tdesc");
908 }
909
910 /* Update all the target description of all processes; a new GDB
911 connected, and it may or not support xml target descriptions. */
912
913 void
914 x86_target::update_xmltarget ()
915 {
916 struct thread_info *saved_thread = current_thread;
917
918 /* Before changing the register cache's internal layout, flush the
919 contents of the current valid caches back to the threads, and
920 release the current regcache objects. */
921 regcache_release ();
922
923 for_each_process ([this] (process_info *proc) {
924 int pid = proc->pid;
925
926 /* Look up any thread of this process. */
927 current_thread = find_any_thread_of_pid (pid);
928
929 low_arch_setup ();
930 });
931
932 current_thread = saved_thread;
933 }
934
935 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
936 PTRACE_GETREGSET. */
937
938 static void
939 x86_linux_process_qsupported (char **features, int count)
940 {
941 int i;
942
943 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
944 with "i386" in qSupported query, it supports x86 XML target
945 descriptions. */
946 use_xml = 0;
947 for (i = 0; i < count; i++)
948 {
949 const char *feature = features[i];
950
951 if (startswith (feature, "xmlRegisters="))
952 {
953 char *copy = xstrdup (feature + 13);
954
955 char *saveptr;
956 for (char *p = strtok_r (copy, ",", &saveptr);
957 p != NULL;
958 p = strtok_r (NULL, ",", &saveptr))
959 {
960 if (strcmp (p, "i386") == 0)
961 {
962 use_xml = 1;
963 break;
964 }
965 }
966
967 free (copy);
968 }
969 }
970 the_x86_target.update_xmltarget ();
971 }
972
973 /* Common for x86/x86-64. */
974
975 static struct regsets_info x86_regsets_info =
976 {
977 x86_regsets, /* regsets */
978 0, /* num_regsets */
979 NULL, /* disabled_regsets */
980 };
981
982 #ifdef __x86_64__
983 static struct regs_info amd64_linux_regs_info =
984 {
985 NULL, /* regset_bitmap */
986 NULL, /* usrregs_info */
987 &x86_regsets_info
988 };
989 #endif
990 static struct usrregs_info i386_linux_usrregs_info =
991 {
992 I386_NUM_REGS,
993 i386_regmap,
994 };
995
996 static struct regs_info i386_linux_regs_info =
997 {
998 NULL, /* regset_bitmap */
999 &i386_linux_usrregs_info,
1000 &x86_regsets_info
1001 };
1002
1003 const regs_info *
1004 x86_target::get_regs_info ()
1005 {
1006 #ifdef __x86_64__
1007 if (is_64bit_tdesc ())
1008 return &amd64_linux_regs_info;
1009 else
1010 #endif
1011 return &i386_linux_regs_info;
1012 }
1013
1014 /* Initialize the target description for the architecture of the
1015 inferior. */
1016
1017 void
1018 x86_target::low_arch_setup ()
1019 {
1020 current_process ()->tdesc = x86_linux_read_description ();
1021 }
1022
1023 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1024 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1025
1026 static void
1027 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1028 {
1029 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1030
1031 if (use_64bit)
1032 {
1033 long l_sysno;
1034
1035 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1036 *sysno = (int) l_sysno;
1037 }
1038 else
1039 collect_register_by_name (regcache, "orig_eax", sysno);
1040 }
1041
1042 static int
1043 x86_supports_tracepoints (void)
1044 {
1045 return 1;
1046 }
1047
1048 static void
1049 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1050 {
1051 target_write_memory (*to, buf, len);
1052 *to += len;
1053 }
1054
1055 static int
1056 push_opcode (unsigned char *buf, const char *op)
1057 {
1058 unsigned char *buf_org = buf;
1059
1060 while (1)
1061 {
1062 char *endptr;
1063 unsigned long ul = strtoul (op, &endptr, 16);
1064
1065 if (endptr == op)
1066 break;
1067
1068 *buf++ = ul;
1069 op = endptr;
1070 }
1071
1072 return buf - buf_org;
1073 }
1074
1075 #ifdef __x86_64__
1076
1077 /* Build a jump pad that saves registers and calls a collection
1078 function. Writes a jump instruction to the jump pad to
1079 JJUMPAD_INSN. The caller is responsible to write it in at the
1080 tracepoint address. */
1081
1082 static int
1083 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1084 CORE_ADDR collector,
1085 CORE_ADDR lockaddr,
1086 ULONGEST orig_size,
1087 CORE_ADDR *jump_entry,
1088 CORE_ADDR *trampoline,
1089 ULONGEST *trampoline_size,
1090 unsigned char *jjump_pad_insn,
1091 ULONGEST *jjump_pad_insn_size,
1092 CORE_ADDR *adjusted_insn_addr,
1093 CORE_ADDR *adjusted_insn_addr_end,
1094 char *err)
1095 {
1096 unsigned char buf[40];
1097 int i, offset;
1098 int64_t loffset;
1099
1100 CORE_ADDR buildaddr = *jump_entry;
1101
1102 /* Build the jump pad. */
1103
1104 /* First, do tracepoint data collection. Save registers. */
1105 i = 0;
1106 /* Need to ensure stack pointer saved first. */
1107 buf[i++] = 0x54; /* push %rsp */
1108 buf[i++] = 0x55; /* push %rbp */
1109 buf[i++] = 0x57; /* push %rdi */
1110 buf[i++] = 0x56; /* push %rsi */
1111 buf[i++] = 0x52; /* push %rdx */
1112 buf[i++] = 0x51; /* push %rcx */
1113 buf[i++] = 0x53; /* push %rbx */
1114 buf[i++] = 0x50; /* push %rax */
1115 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1116 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1117 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1118 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1119 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1120 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1121 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1122 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1123 buf[i++] = 0x9c; /* pushfq */
1124 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1125 buf[i++] = 0xbf;
1126 memcpy (buf + i, &tpaddr, 8);
1127 i += 8;
1128 buf[i++] = 0x57; /* push %rdi */
1129 append_insns (&buildaddr, i, buf);
1130
1131 /* Stack space for the collecting_t object. */
1132 i = 0;
1133 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1134 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1135 memcpy (buf + i, &tpoint, 8);
1136 i += 8;
1137 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1138 i += push_opcode (&buf[i],
1139 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1140 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1141 append_insns (&buildaddr, i, buf);
1142
1143 /* spin-lock. */
1144 i = 0;
1145 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1146 memcpy (&buf[i], (void *) &lockaddr, 8);
1147 i += 8;
1148 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1149 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1150 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1151 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1152 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1153 append_insns (&buildaddr, i, buf);
1154
1155 /* Set up the gdb_collect call. */
1156 /* At this point, (stack pointer + 0x18) is the base of our saved
1157 register block. */
1158
1159 i = 0;
1160 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1161 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1162
1163 /* tpoint address may be 64-bit wide. */
1164 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1165 memcpy (buf + i, &tpoint, 8);
1166 i += 8;
1167 append_insns (&buildaddr, i, buf);
1168
1169 /* The collector function being in the shared library, may be
1170 >31-bits away off the jump pad. */
1171 i = 0;
1172 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1173 memcpy (buf + i, &collector, 8);
1174 i += 8;
1175 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1176 append_insns (&buildaddr, i, buf);
1177
1178 /* Clear the spin-lock. */
1179 i = 0;
1180 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1181 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1182 memcpy (buf + i, &lockaddr, 8);
1183 i += 8;
1184 append_insns (&buildaddr, i, buf);
1185
1186 /* Remove stack that had been used for the collect_t object. */
1187 i = 0;
1188 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1189 append_insns (&buildaddr, i, buf);
1190
1191 /* Restore register state. */
1192 i = 0;
1193 buf[i++] = 0x48; /* add $0x8,%rsp */
1194 buf[i++] = 0x83;
1195 buf[i++] = 0xc4;
1196 buf[i++] = 0x08;
1197 buf[i++] = 0x9d; /* popfq */
1198 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1199 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1200 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1201 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1202 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1203 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1204 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1205 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1206 buf[i++] = 0x58; /* pop %rax */
1207 buf[i++] = 0x5b; /* pop %rbx */
1208 buf[i++] = 0x59; /* pop %rcx */
1209 buf[i++] = 0x5a; /* pop %rdx */
1210 buf[i++] = 0x5e; /* pop %rsi */
1211 buf[i++] = 0x5f; /* pop %rdi */
1212 buf[i++] = 0x5d; /* pop %rbp */
1213 buf[i++] = 0x5c; /* pop %rsp */
1214 append_insns (&buildaddr, i, buf);
1215
1216 /* Now, adjust the original instruction to execute in the jump
1217 pad. */
1218 *adjusted_insn_addr = buildaddr;
1219 relocate_instruction (&buildaddr, tpaddr);
1220 *adjusted_insn_addr_end = buildaddr;
1221
1222 /* Finally, write a jump back to the program. */
1223
1224 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1225 if (loffset > INT_MAX || loffset < INT_MIN)
1226 {
1227 sprintf (err,
1228 "E.Jump back from jump pad too far from tracepoint "
1229 "(offset 0x%" PRIx64 " > int32).", loffset);
1230 return 1;
1231 }
1232
1233 offset = (int) loffset;
1234 memcpy (buf, jump_insn, sizeof (jump_insn));
1235 memcpy (buf + 1, &offset, 4);
1236 append_insns (&buildaddr, sizeof (jump_insn), buf);
1237
1238 /* The jump pad is now built. Wire in a jump to our jump pad. This
1239 is always done last (by our caller actually), so that we can
1240 install fast tracepoints with threads running. This relies on
1241 the agent's atomic write support. */
1242 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1243 if (loffset > INT_MAX || loffset < INT_MIN)
1244 {
1245 sprintf (err,
1246 "E.Jump pad too far from tracepoint "
1247 "(offset 0x%" PRIx64 " > int32).", loffset);
1248 return 1;
1249 }
1250
1251 offset = (int) loffset;
1252
1253 memcpy (buf, jump_insn, sizeof (jump_insn));
1254 memcpy (buf + 1, &offset, 4);
1255 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1256 *jjump_pad_insn_size = sizeof (jump_insn);
1257
1258 /* Return the end address of our pad. */
1259 *jump_entry = buildaddr;
1260
1261 return 0;
1262 }
1263
1264 #endif /* __x86_64__ */
1265
1266 /* Build a jump pad that saves registers and calls a collection
1267 function. Writes a jump instruction to the jump pad to
1268 JJUMPAD_INSN. The caller is responsible to write it in at the
1269 tracepoint address. */
1270
1271 static int
1272 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1273 CORE_ADDR collector,
1274 CORE_ADDR lockaddr,
1275 ULONGEST orig_size,
1276 CORE_ADDR *jump_entry,
1277 CORE_ADDR *trampoline,
1278 ULONGEST *trampoline_size,
1279 unsigned char *jjump_pad_insn,
1280 ULONGEST *jjump_pad_insn_size,
1281 CORE_ADDR *adjusted_insn_addr,
1282 CORE_ADDR *adjusted_insn_addr_end,
1283 char *err)
1284 {
1285 unsigned char buf[0x100];
1286 int i, offset;
1287 CORE_ADDR buildaddr = *jump_entry;
1288
1289 /* Build the jump pad. */
1290
1291 /* First, do tracepoint data collection. Save registers. */
1292 i = 0;
1293 buf[i++] = 0x60; /* pushad */
1294 buf[i++] = 0x68; /* push tpaddr aka $pc */
1295 *((int *)(buf + i)) = (int) tpaddr;
1296 i += 4;
1297 buf[i++] = 0x9c; /* pushf */
1298 buf[i++] = 0x1e; /* push %ds */
1299 buf[i++] = 0x06; /* push %es */
1300 buf[i++] = 0x0f; /* push %fs */
1301 buf[i++] = 0xa0;
1302 buf[i++] = 0x0f; /* push %gs */
1303 buf[i++] = 0xa8;
1304 buf[i++] = 0x16; /* push %ss */
1305 buf[i++] = 0x0e; /* push %cs */
1306 append_insns (&buildaddr, i, buf);
1307
1308 /* Stack space for the collecting_t object. */
1309 i = 0;
1310 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1311
1312 /* Build the object. */
1313 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1314 memcpy (buf + i, &tpoint, 4);
1315 i += 4;
1316 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1317
1318 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1319 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1320 append_insns (&buildaddr, i, buf);
1321
1322 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1323 If we cared for it, this could be using xchg alternatively. */
1324
1325 i = 0;
1326 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1327 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1328 %esp,<lockaddr> */
1329 memcpy (&buf[i], (void *) &lockaddr, 4);
1330 i += 4;
1331 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1332 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1333 append_insns (&buildaddr, i, buf);
1334
1335
1336 /* Set up arguments to the gdb_collect call. */
1337 i = 0;
1338 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1339 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1340 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1341 append_insns (&buildaddr, i, buf);
1342
1343 i = 0;
1344 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1345 append_insns (&buildaddr, i, buf);
1346
1347 i = 0;
1348 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1349 memcpy (&buf[i], (void *) &tpoint, 4);
1350 i += 4;
1351 append_insns (&buildaddr, i, buf);
1352
1353 buf[0] = 0xe8; /* call <reladdr> */
1354 offset = collector - (buildaddr + sizeof (jump_insn));
1355 memcpy (buf + 1, &offset, 4);
1356 append_insns (&buildaddr, 5, buf);
1357 /* Clean up after the call. */
1358 buf[0] = 0x83; /* add $0x8,%esp */
1359 buf[1] = 0xc4;
1360 buf[2] = 0x08;
1361 append_insns (&buildaddr, 3, buf);
1362
1363
1364 /* Clear the spin-lock. This would need the LOCK prefix on older
1365 broken archs. */
1366 i = 0;
1367 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1368 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1369 memcpy (buf + i, &lockaddr, 4);
1370 i += 4;
1371 append_insns (&buildaddr, i, buf);
1372
1373
1374 /* Remove stack that had been used for the collect_t object. */
1375 i = 0;
1376 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1377 append_insns (&buildaddr, i, buf);
1378
1379 i = 0;
1380 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1381 buf[i++] = 0xc4;
1382 buf[i++] = 0x04;
1383 buf[i++] = 0x17; /* pop %ss */
1384 buf[i++] = 0x0f; /* pop %gs */
1385 buf[i++] = 0xa9;
1386 buf[i++] = 0x0f; /* pop %fs */
1387 buf[i++] = 0xa1;
1388 buf[i++] = 0x07; /* pop %es */
1389 buf[i++] = 0x1f; /* pop %ds */
1390 buf[i++] = 0x9d; /* popf */
1391 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1392 buf[i++] = 0xc4;
1393 buf[i++] = 0x04;
1394 buf[i++] = 0x61; /* popad */
1395 append_insns (&buildaddr, i, buf);
1396
1397 /* Now, adjust the original instruction to execute in the jump
1398 pad. */
1399 *adjusted_insn_addr = buildaddr;
1400 relocate_instruction (&buildaddr, tpaddr);
1401 *adjusted_insn_addr_end = buildaddr;
1402
1403 /* Write the jump back to the program. */
1404 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1405 memcpy (buf, jump_insn, sizeof (jump_insn));
1406 memcpy (buf + 1, &offset, 4);
1407 append_insns (&buildaddr, sizeof (jump_insn), buf);
1408
1409 /* The jump pad is now built. Wire in a jump to our jump pad. This
1410 is always done last (by our caller actually), so that we can
1411 install fast tracepoints with threads running. This relies on
1412 the agent's atomic write support. */
1413 if (orig_size == 4)
1414 {
1415 /* Create a trampoline. */
1416 *trampoline_size = sizeof (jump_insn);
1417 if (!claim_trampoline_space (*trampoline_size, trampoline))
1418 {
1419 /* No trampoline space available. */
1420 strcpy (err,
1421 "E.Cannot allocate trampoline space needed for fast "
1422 "tracepoints on 4-byte instructions.");
1423 return 1;
1424 }
1425
1426 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1427 memcpy (buf, jump_insn, sizeof (jump_insn));
1428 memcpy (buf + 1, &offset, 4);
1429 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1430
1431 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1432 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1433 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1434 memcpy (buf + 2, &offset, 2);
1435 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1436 *jjump_pad_insn_size = sizeof (small_jump_insn);
1437 }
1438 else
1439 {
1440 /* Else use a 32-bit relative jump instruction. */
1441 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1442 memcpy (buf, jump_insn, sizeof (jump_insn));
1443 memcpy (buf + 1, &offset, 4);
1444 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1445 *jjump_pad_insn_size = sizeof (jump_insn);
1446 }
1447
1448 /* Return the end address of our pad. */
1449 *jump_entry = buildaddr;
1450
1451 return 0;
1452 }
1453
1454 static int
1455 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1456 CORE_ADDR collector,
1457 CORE_ADDR lockaddr,
1458 ULONGEST orig_size,
1459 CORE_ADDR *jump_entry,
1460 CORE_ADDR *trampoline,
1461 ULONGEST *trampoline_size,
1462 unsigned char *jjump_pad_insn,
1463 ULONGEST *jjump_pad_insn_size,
1464 CORE_ADDR *adjusted_insn_addr,
1465 CORE_ADDR *adjusted_insn_addr_end,
1466 char *err)
1467 {
1468 #ifdef __x86_64__
1469 if (is_64bit_tdesc ())
1470 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1471 collector, lockaddr,
1472 orig_size, jump_entry,
1473 trampoline, trampoline_size,
1474 jjump_pad_insn,
1475 jjump_pad_insn_size,
1476 adjusted_insn_addr,
1477 adjusted_insn_addr_end,
1478 err);
1479 #endif
1480
1481 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1482 collector, lockaddr,
1483 orig_size, jump_entry,
1484 trampoline, trampoline_size,
1485 jjump_pad_insn,
1486 jjump_pad_insn_size,
1487 adjusted_insn_addr,
1488 adjusted_insn_addr_end,
1489 err);
1490 }
1491
1492 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1493 architectures. */
1494
1495 static int
1496 x86_get_min_fast_tracepoint_insn_len (void)
1497 {
1498 static int warned_about_fast_tracepoints = 0;
1499
1500 #ifdef __x86_64__
1501 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1502 used for fast tracepoints. */
1503 if (is_64bit_tdesc ())
1504 return 5;
1505 #endif
1506
1507 if (agent_loaded_p ())
1508 {
1509 char errbuf[IPA_BUFSIZ];
1510
1511 errbuf[0] = '\0';
1512
1513 /* On x86, if trampolines are available, then 4-byte jump instructions
1514 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1515 with a 4-byte offset are used instead. */
1516 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1517 return 4;
1518 else
1519 {
1520 /* GDB has no channel to explain to user why a shorter fast
1521 tracepoint is not possible, but at least make GDBserver
1522 mention that something has gone awry. */
1523 if (!warned_about_fast_tracepoints)
1524 {
1525 warning ("4-byte fast tracepoints not available; %s", errbuf);
1526 warned_about_fast_tracepoints = 1;
1527 }
1528 return 5;
1529 }
1530 }
1531 else
1532 {
1533 /* Indicate that the minimum length is currently unknown since the IPA
1534 has not loaded yet. */
1535 return 0;
1536 }
1537 }
1538
1539 static void
1540 add_insns (unsigned char *start, int len)
1541 {
1542 CORE_ADDR buildaddr = current_insn_ptr;
1543
1544 if (debug_threads)
1545 debug_printf ("Adding %d bytes of insn at %s\n",
1546 len, paddress (buildaddr));
1547
1548 append_insns (&buildaddr, len, start);
1549 current_insn_ptr = buildaddr;
1550 }
1551
1552 /* Our general strategy for emitting code is to avoid specifying raw
1553 bytes whenever possible, and instead copy a block of inline asm
1554 that is embedded in the function. This is a little messy, because
1555 we need to keep the compiler from discarding what looks like dead
1556 code, plus suppress various warnings. */
1557
1558 #define EMIT_ASM(NAME, INSNS) \
1559 do \
1560 { \
1561 extern unsigned char start_ ## NAME, end_ ## NAME; \
1562 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1563 __asm__ ("jmp end_" #NAME "\n" \
1564 "\t" "start_" #NAME ":" \
1565 "\t" INSNS "\n" \
1566 "\t" "end_" #NAME ":"); \
1567 } while (0)
1568
1569 #ifdef __x86_64__
1570
1571 #define EMIT_ASM32(NAME,INSNS) \
1572 do \
1573 { \
1574 extern unsigned char start_ ## NAME, end_ ## NAME; \
1575 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1576 __asm__ (".code32\n" \
1577 "\t" "jmp end_" #NAME "\n" \
1578 "\t" "start_" #NAME ":\n" \
1579 "\t" INSNS "\n" \
1580 "\t" "end_" #NAME ":\n" \
1581 ".code64\n"); \
1582 } while (0)
1583
1584 #else
1585
1586 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1587
1588 #endif
1589
1590 #ifdef __x86_64__
1591
1592 static void
1593 amd64_emit_prologue (void)
1594 {
1595 EMIT_ASM (amd64_prologue,
1596 "pushq %rbp\n\t"
1597 "movq %rsp,%rbp\n\t"
1598 "sub $0x20,%rsp\n\t"
1599 "movq %rdi,-8(%rbp)\n\t"
1600 "movq %rsi,-16(%rbp)");
1601 }
1602
1603
1604 static void
1605 amd64_emit_epilogue (void)
1606 {
1607 EMIT_ASM (amd64_epilogue,
1608 "movq -16(%rbp),%rdi\n\t"
1609 "movq %rax,(%rdi)\n\t"
1610 "xor %rax,%rax\n\t"
1611 "leave\n\t"
1612 "ret");
1613 }
1614
1615 static void
1616 amd64_emit_add (void)
1617 {
1618 EMIT_ASM (amd64_add,
1619 "add (%rsp),%rax\n\t"
1620 "lea 0x8(%rsp),%rsp");
1621 }
1622
1623 static void
1624 amd64_emit_sub (void)
1625 {
1626 EMIT_ASM (amd64_sub,
1627 "sub %rax,(%rsp)\n\t"
1628 "pop %rax");
1629 }
1630
1631 static void
1632 amd64_emit_mul (void)
1633 {
1634 emit_error = 1;
1635 }
1636
1637 static void
1638 amd64_emit_lsh (void)
1639 {
1640 emit_error = 1;
1641 }
1642
1643 static void
1644 amd64_emit_rsh_signed (void)
1645 {
1646 emit_error = 1;
1647 }
1648
1649 static void
1650 amd64_emit_rsh_unsigned (void)
1651 {
1652 emit_error = 1;
1653 }
1654
1655 static void
1656 amd64_emit_ext (int arg)
1657 {
1658 switch (arg)
1659 {
1660 case 8:
1661 EMIT_ASM (amd64_ext_8,
1662 "cbtw\n\t"
1663 "cwtl\n\t"
1664 "cltq");
1665 break;
1666 case 16:
1667 EMIT_ASM (amd64_ext_16,
1668 "cwtl\n\t"
1669 "cltq");
1670 break;
1671 case 32:
1672 EMIT_ASM (amd64_ext_32,
1673 "cltq");
1674 break;
1675 default:
1676 emit_error = 1;
1677 }
1678 }
1679
1680 static void
1681 amd64_emit_log_not (void)
1682 {
1683 EMIT_ASM (amd64_log_not,
1684 "test %rax,%rax\n\t"
1685 "sete %cl\n\t"
1686 "movzbq %cl,%rax");
1687 }
1688
1689 static void
1690 amd64_emit_bit_and (void)
1691 {
1692 EMIT_ASM (amd64_and,
1693 "and (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695 }
1696
1697 static void
1698 amd64_emit_bit_or (void)
1699 {
1700 EMIT_ASM (amd64_or,
1701 "or (%rsp),%rax\n\t"
1702 "lea 0x8(%rsp),%rsp");
1703 }
1704
1705 static void
1706 amd64_emit_bit_xor (void)
1707 {
1708 EMIT_ASM (amd64_xor,
1709 "xor (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1711 }
1712
1713 static void
1714 amd64_emit_bit_not (void)
1715 {
1716 EMIT_ASM (amd64_bit_not,
1717 "xorq $0xffffffffffffffff,%rax");
1718 }
1719
1720 static void
1721 amd64_emit_equal (void)
1722 {
1723 EMIT_ASM (amd64_equal,
1724 "cmp %rax,(%rsp)\n\t"
1725 "je .Lamd64_equal_true\n\t"
1726 "xor %rax,%rax\n\t"
1727 "jmp .Lamd64_equal_end\n\t"
1728 ".Lamd64_equal_true:\n\t"
1729 "mov $0x1,%rax\n\t"
1730 ".Lamd64_equal_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732 }
1733
1734 static void
1735 amd64_emit_less_signed (void)
1736 {
1737 EMIT_ASM (amd64_less_signed,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jl .Lamd64_less_signed_true\n\t"
1740 "xor %rax,%rax\n\t"
1741 "jmp .Lamd64_less_signed_end\n\t"
1742 ".Lamd64_less_signed_true:\n\t"
1743 "mov $1,%rax\n\t"
1744 ".Lamd64_less_signed_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1746 }
1747
1748 static void
1749 amd64_emit_less_unsigned (void)
1750 {
1751 EMIT_ASM (amd64_less_unsigned,
1752 "cmp %rax,(%rsp)\n\t"
1753 "jb .Lamd64_less_unsigned_true\n\t"
1754 "xor %rax,%rax\n\t"
1755 "jmp .Lamd64_less_unsigned_end\n\t"
1756 ".Lamd64_less_unsigned_true:\n\t"
1757 "mov $1,%rax\n\t"
1758 ".Lamd64_less_unsigned_end:\n\t"
1759 "lea 0x8(%rsp),%rsp");
1760 }
1761
1762 static void
1763 amd64_emit_ref (int size)
1764 {
1765 switch (size)
1766 {
1767 case 1:
1768 EMIT_ASM (amd64_ref1,
1769 "movb (%rax),%al");
1770 break;
1771 case 2:
1772 EMIT_ASM (amd64_ref2,
1773 "movw (%rax),%ax");
1774 break;
1775 case 4:
1776 EMIT_ASM (amd64_ref4,
1777 "movl (%rax),%eax");
1778 break;
1779 case 8:
1780 EMIT_ASM (amd64_ref8,
1781 "movq (%rax),%rax");
1782 break;
1783 }
1784 }
1785
1786 static void
1787 amd64_emit_if_goto (int *offset_p, int *size_p)
1788 {
1789 EMIT_ASM (amd64_if_goto,
1790 "mov %rax,%rcx\n\t"
1791 "pop %rax\n\t"
1792 "cmp $0,%rcx\n\t"
1793 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1794 if (offset_p)
1795 *offset_p = 10;
1796 if (size_p)
1797 *size_p = 4;
1798 }
1799
1800 static void
1801 amd64_emit_goto (int *offset_p, int *size_p)
1802 {
1803 EMIT_ASM (amd64_goto,
1804 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1805 if (offset_p)
1806 *offset_p = 1;
1807 if (size_p)
1808 *size_p = 4;
1809 }
1810
1811 static void
1812 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1813 {
1814 int diff = (to - (from + size));
1815 unsigned char buf[sizeof (int)];
1816
1817 if (size != 4)
1818 {
1819 emit_error = 1;
1820 return;
1821 }
1822
1823 memcpy (buf, &diff, sizeof (int));
1824 target_write_memory (from, buf, sizeof (int));
1825 }
1826
1827 static void
1828 amd64_emit_const (LONGEST num)
1829 {
1830 unsigned char buf[16];
1831 int i;
1832 CORE_ADDR buildaddr = current_insn_ptr;
1833
1834 i = 0;
1835 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1836 memcpy (&buf[i], &num, sizeof (num));
1837 i += 8;
1838 append_insns (&buildaddr, i, buf);
1839 current_insn_ptr = buildaddr;
1840 }
1841
1842 static void
1843 amd64_emit_call (CORE_ADDR fn)
1844 {
1845 unsigned char buf[16];
1846 int i;
1847 CORE_ADDR buildaddr;
1848 LONGEST offset64;
1849
1850 /* The destination function being in the shared library, may be
1851 >31-bits away off the compiled code pad. */
1852
1853 buildaddr = current_insn_ptr;
1854
1855 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1856
1857 i = 0;
1858
1859 if (offset64 > INT_MAX || offset64 < INT_MIN)
1860 {
1861 /* Offset is too large for a call. Use callq, but that requires
1862 a register, so avoid it if possible. Use r10, since it is
1863 call-clobbered, we don't have to push/pop it. */
1864 buf[i++] = 0x48; /* mov $fn,%r10 */
1865 buf[i++] = 0xba;
1866 memcpy (buf + i, &fn, 8);
1867 i += 8;
1868 buf[i++] = 0xff; /* callq *%r10 */
1869 buf[i++] = 0xd2;
1870 }
1871 else
1872 {
1873 int offset32 = offset64; /* we know we can't overflow here. */
1874
1875 buf[i++] = 0xe8; /* call <reladdr> */
1876 memcpy (buf + i, &offset32, 4);
1877 i += 4;
1878 }
1879
1880 append_insns (&buildaddr, i, buf);
1881 current_insn_ptr = buildaddr;
1882 }
1883
1884 static void
1885 amd64_emit_reg (int reg)
1886 {
1887 unsigned char buf[16];
1888 int i;
1889 CORE_ADDR buildaddr;
1890
1891 /* Assume raw_regs is still in %rdi. */
1892 buildaddr = current_insn_ptr;
1893 i = 0;
1894 buf[i++] = 0xbe; /* mov $<n>,%esi */
1895 memcpy (&buf[i], &reg, sizeof (reg));
1896 i += 4;
1897 append_insns (&buildaddr, i, buf);
1898 current_insn_ptr = buildaddr;
1899 amd64_emit_call (get_raw_reg_func_addr ());
1900 }
1901
1902 static void
1903 amd64_emit_pop (void)
1904 {
1905 EMIT_ASM (amd64_pop,
1906 "pop %rax");
1907 }
1908
1909 static void
1910 amd64_emit_stack_flush (void)
1911 {
1912 EMIT_ASM (amd64_stack_flush,
1913 "push %rax");
1914 }
1915
1916 static void
1917 amd64_emit_zero_ext (int arg)
1918 {
1919 switch (arg)
1920 {
1921 case 8:
1922 EMIT_ASM (amd64_zero_ext_8,
1923 "and $0xff,%rax");
1924 break;
1925 case 16:
1926 EMIT_ASM (amd64_zero_ext_16,
1927 "and $0xffff,%rax");
1928 break;
1929 case 32:
1930 EMIT_ASM (amd64_zero_ext_32,
1931 "mov $0xffffffff,%rcx\n\t"
1932 "and %rcx,%rax");
1933 break;
1934 default:
1935 emit_error = 1;
1936 }
1937 }
1938
1939 static void
1940 amd64_emit_swap (void)
1941 {
1942 EMIT_ASM (amd64_swap,
1943 "mov %rax,%rcx\n\t"
1944 "pop %rax\n\t"
1945 "push %rcx");
1946 }
1947
1948 static void
1949 amd64_emit_stack_adjust (int n)
1950 {
1951 unsigned char buf[16];
1952 int i;
1953 CORE_ADDR buildaddr = current_insn_ptr;
1954
1955 i = 0;
1956 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1957 buf[i++] = 0x8d;
1958 buf[i++] = 0x64;
1959 buf[i++] = 0x24;
1960 /* This only handles adjustments up to 16, but we don't expect any more. */
1961 buf[i++] = n * 8;
1962 append_insns (&buildaddr, i, buf);
1963 current_insn_ptr = buildaddr;
1964 }
1965
1966 /* FN's prototype is `LONGEST(*fn)(int)'. */
1967
1968 static void
1969 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1970 {
1971 unsigned char buf[16];
1972 int i;
1973 CORE_ADDR buildaddr;
1974
1975 buildaddr = current_insn_ptr;
1976 i = 0;
1977 buf[i++] = 0xbf; /* movl $<n>,%edi */
1978 memcpy (&buf[i], &arg1, sizeof (arg1));
1979 i += 4;
1980 append_insns (&buildaddr, i, buf);
1981 current_insn_ptr = buildaddr;
1982 amd64_emit_call (fn);
1983 }
1984
1985 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1986
1987 static void
1988 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1989 {
1990 unsigned char buf[16];
1991 int i;
1992 CORE_ADDR buildaddr;
1993
1994 buildaddr = current_insn_ptr;
1995 i = 0;
1996 buf[i++] = 0xbf; /* movl $<n>,%edi */
1997 memcpy (&buf[i], &arg1, sizeof (arg1));
1998 i += 4;
1999 append_insns (&buildaddr, i, buf);
2000 current_insn_ptr = buildaddr;
2001 EMIT_ASM (amd64_void_call_2_a,
2002 /* Save away a copy of the stack top. */
2003 "push %rax\n\t"
2004 /* Also pass top as the second argument. */
2005 "mov %rax,%rsi");
2006 amd64_emit_call (fn);
2007 EMIT_ASM (amd64_void_call_2_b,
2008 /* Restore the stack top, %rax may have been trashed. */
2009 "pop %rax");
2010 }
2011
2012 static void
2013 amd64_emit_eq_goto (int *offset_p, int *size_p)
2014 {
2015 EMIT_ASM (amd64_eq,
2016 "cmp %rax,(%rsp)\n\t"
2017 "jne .Lamd64_eq_fallthru\n\t"
2018 "lea 0x8(%rsp),%rsp\n\t"
2019 "pop %rax\n\t"
2020 /* jmp, but don't trust the assembler to choose the right jump */
2021 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2022 ".Lamd64_eq_fallthru:\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2024 "pop %rax");
2025
2026 if (offset_p)
2027 *offset_p = 13;
2028 if (size_p)
2029 *size_p = 4;
2030 }
2031
2032 static void
2033 amd64_emit_ne_goto (int *offset_p, int *size_p)
2034 {
2035 EMIT_ASM (amd64_ne,
2036 "cmp %rax,(%rsp)\n\t"
2037 "je .Lamd64_ne_fallthru\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax\n\t"
2040 /* jmp, but don't trust the assembler to choose the right jump */
2041 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2042 ".Lamd64_ne_fallthru:\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2044 "pop %rax");
2045
2046 if (offset_p)
2047 *offset_p = 13;
2048 if (size_p)
2049 *size_p = 4;
2050 }
2051
2052 static void
2053 amd64_emit_lt_goto (int *offset_p, int *size_p)
2054 {
2055 EMIT_ASM (amd64_lt,
2056 "cmp %rax,(%rsp)\n\t"
2057 "jnl .Lamd64_lt_fallthru\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax\n\t"
2060 /* jmp, but don't trust the assembler to choose the right jump */
2061 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2062 ".Lamd64_lt_fallthru:\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2064 "pop %rax");
2065
2066 if (offset_p)
2067 *offset_p = 13;
2068 if (size_p)
2069 *size_p = 4;
2070 }
2071
2072 static void
2073 amd64_emit_le_goto (int *offset_p, int *size_p)
2074 {
2075 EMIT_ASM (amd64_le,
2076 "cmp %rax,(%rsp)\n\t"
2077 "jnle .Lamd64_le_fallthru\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax\n\t"
2080 /* jmp, but don't trust the assembler to choose the right jump */
2081 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2082 ".Lamd64_le_fallthru:\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2084 "pop %rax");
2085
2086 if (offset_p)
2087 *offset_p = 13;
2088 if (size_p)
2089 *size_p = 4;
2090 }
2091
2092 static void
2093 amd64_emit_gt_goto (int *offset_p, int *size_p)
2094 {
2095 EMIT_ASM (amd64_gt,
2096 "cmp %rax,(%rsp)\n\t"
2097 "jng .Lamd64_gt_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_gt_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax");
2105
2106 if (offset_p)
2107 *offset_p = 13;
2108 if (size_p)
2109 *size_p = 4;
2110 }
2111
2112 static void
2113 amd64_emit_ge_goto (int *offset_p, int *size_p)
2114 {
2115 EMIT_ASM (amd64_ge,
2116 "cmp %rax,(%rsp)\n\t"
2117 "jnge .Lamd64_ge_fallthru\n\t"
2118 ".Lamd64_ge_jump:\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2120 "pop %rax\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_ge_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2125 "pop %rax");
2126
2127 if (offset_p)
2128 *offset_p = 13;
2129 if (size_p)
2130 *size_p = 4;
2131 }
2132
2133 struct emit_ops amd64_emit_ops =
2134 {
2135 amd64_emit_prologue,
2136 amd64_emit_epilogue,
2137 amd64_emit_add,
2138 amd64_emit_sub,
2139 amd64_emit_mul,
2140 amd64_emit_lsh,
2141 amd64_emit_rsh_signed,
2142 amd64_emit_rsh_unsigned,
2143 amd64_emit_ext,
2144 amd64_emit_log_not,
2145 amd64_emit_bit_and,
2146 amd64_emit_bit_or,
2147 amd64_emit_bit_xor,
2148 amd64_emit_bit_not,
2149 amd64_emit_equal,
2150 amd64_emit_less_signed,
2151 amd64_emit_less_unsigned,
2152 amd64_emit_ref,
2153 amd64_emit_if_goto,
2154 amd64_emit_goto,
2155 amd64_write_goto_address,
2156 amd64_emit_const,
2157 amd64_emit_call,
2158 amd64_emit_reg,
2159 amd64_emit_pop,
2160 amd64_emit_stack_flush,
2161 amd64_emit_zero_ext,
2162 amd64_emit_swap,
2163 amd64_emit_stack_adjust,
2164 amd64_emit_int_call_1,
2165 amd64_emit_void_call_2,
2166 amd64_emit_eq_goto,
2167 amd64_emit_ne_goto,
2168 amd64_emit_lt_goto,
2169 amd64_emit_le_goto,
2170 amd64_emit_gt_goto,
2171 amd64_emit_ge_goto
2172 };
2173
2174 #endif /* __x86_64__ */
2175
2176 static void
2177 i386_emit_prologue (void)
2178 {
2179 EMIT_ASM32 (i386_prologue,
2180 "push %ebp\n\t"
2181 "mov %esp,%ebp\n\t"
2182 "push %ebx");
2183 /* At this point, the raw regs base address is at 8(%ebp), and the
2184 value pointer is at 12(%ebp). */
2185 }
2186
2187 static void
2188 i386_emit_epilogue (void)
2189 {
2190 EMIT_ASM32 (i386_epilogue,
2191 "mov 12(%ebp),%ecx\n\t"
2192 "mov %eax,(%ecx)\n\t"
2193 "mov %ebx,0x4(%ecx)\n\t"
2194 "xor %eax,%eax\n\t"
2195 "pop %ebx\n\t"
2196 "pop %ebp\n\t"
2197 "ret");
2198 }
2199
2200 static void
2201 i386_emit_add (void)
2202 {
2203 EMIT_ASM32 (i386_add,
2204 "add (%esp),%eax\n\t"
2205 "adc 0x4(%esp),%ebx\n\t"
2206 "lea 0x8(%esp),%esp");
2207 }
2208
2209 static void
2210 i386_emit_sub (void)
2211 {
2212 EMIT_ASM32 (i386_sub,
2213 "subl %eax,(%esp)\n\t"
2214 "sbbl %ebx,4(%esp)\n\t"
2215 "pop %eax\n\t"
2216 "pop %ebx\n\t");
2217 }
2218
2219 static void
2220 i386_emit_mul (void)
2221 {
2222 emit_error = 1;
2223 }
2224
2225 static void
2226 i386_emit_lsh (void)
2227 {
2228 emit_error = 1;
2229 }
2230
2231 static void
2232 i386_emit_rsh_signed (void)
2233 {
2234 emit_error = 1;
2235 }
2236
2237 static void
2238 i386_emit_rsh_unsigned (void)
2239 {
2240 emit_error = 1;
2241 }
2242
2243 static void
2244 i386_emit_ext (int arg)
2245 {
2246 switch (arg)
2247 {
2248 case 8:
2249 EMIT_ASM32 (i386_ext_8,
2250 "cbtw\n\t"
2251 "cwtl\n\t"
2252 "movl %eax,%ebx\n\t"
2253 "sarl $31,%ebx");
2254 break;
2255 case 16:
2256 EMIT_ASM32 (i386_ext_16,
2257 "cwtl\n\t"
2258 "movl %eax,%ebx\n\t"
2259 "sarl $31,%ebx");
2260 break;
2261 case 32:
2262 EMIT_ASM32 (i386_ext_32,
2263 "movl %eax,%ebx\n\t"
2264 "sarl $31,%ebx");
2265 break;
2266 default:
2267 emit_error = 1;
2268 }
2269 }
2270
2271 static void
2272 i386_emit_log_not (void)
2273 {
2274 EMIT_ASM32 (i386_log_not,
2275 "or %ebx,%eax\n\t"
2276 "test %eax,%eax\n\t"
2277 "sete %cl\n\t"
2278 "xor %ebx,%ebx\n\t"
2279 "movzbl %cl,%eax");
2280 }
2281
2282 static void
2283 i386_emit_bit_and (void)
2284 {
2285 EMIT_ASM32 (i386_and,
2286 "and (%esp),%eax\n\t"
2287 "and 0x4(%esp),%ebx\n\t"
2288 "lea 0x8(%esp),%esp");
2289 }
2290
2291 static void
2292 i386_emit_bit_or (void)
2293 {
2294 EMIT_ASM32 (i386_or,
2295 "or (%esp),%eax\n\t"
2296 "or 0x4(%esp),%ebx\n\t"
2297 "lea 0x8(%esp),%esp");
2298 }
2299
2300 static void
2301 i386_emit_bit_xor (void)
2302 {
2303 EMIT_ASM32 (i386_xor,
2304 "xor (%esp),%eax\n\t"
2305 "xor 0x4(%esp),%ebx\n\t"
2306 "lea 0x8(%esp),%esp");
2307 }
2308
2309 static void
2310 i386_emit_bit_not (void)
2311 {
2312 EMIT_ASM32 (i386_bit_not,
2313 "xor $0xffffffff,%eax\n\t"
2314 "xor $0xffffffff,%ebx\n\t");
2315 }
2316
2317 static void
2318 i386_emit_equal (void)
2319 {
2320 EMIT_ASM32 (i386_equal,
2321 "cmpl %ebx,4(%esp)\n\t"
2322 "jne .Li386_equal_false\n\t"
2323 "cmpl %eax,(%esp)\n\t"
2324 "je .Li386_equal_true\n\t"
2325 ".Li386_equal_false:\n\t"
2326 "xor %eax,%eax\n\t"
2327 "jmp .Li386_equal_end\n\t"
2328 ".Li386_equal_true:\n\t"
2329 "mov $1,%eax\n\t"
2330 ".Li386_equal_end:\n\t"
2331 "xor %ebx,%ebx\n\t"
2332 "lea 0x8(%esp),%esp");
2333 }
2334
2335 static void
2336 i386_emit_less_signed (void)
2337 {
2338 EMIT_ASM32 (i386_less_signed,
2339 "cmpl %ebx,4(%esp)\n\t"
2340 "jl .Li386_less_signed_true\n\t"
2341 "jne .Li386_less_signed_false\n\t"
2342 "cmpl %eax,(%esp)\n\t"
2343 "jl .Li386_less_signed_true\n\t"
2344 ".Li386_less_signed_false:\n\t"
2345 "xor %eax,%eax\n\t"
2346 "jmp .Li386_less_signed_end\n\t"
2347 ".Li386_less_signed_true:\n\t"
2348 "mov $1,%eax\n\t"
2349 ".Li386_less_signed_end:\n\t"
2350 "xor %ebx,%ebx\n\t"
2351 "lea 0x8(%esp),%esp");
2352 }
2353
2354 static void
2355 i386_emit_less_unsigned (void)
2356 {
2357 EMIT_ASM32 (i386_less_unsigned,
2358 "cmpl %ebx,4(%esp)\n\t"
2359 "jb .Li386_less_unsigned_true\n\t"
2360 "jne .Li386_less_unsigned_false\n\t"
2361 "cmpl %eax,(%esp)\n\t"
2362 "jb .Li386_less_unsigned_true\n\t"
2363 ".Li386_less_unsigned_false:\n\t"
2364 "xor %eax,%eax\n\t"
2365 "jmp .Li386_less_unsigned_end\n\t"
2366 ".Li386_less_unsigned_true:\n\t"
2367 "mov $1,%eax\n\t"
2368 ".Li386_less_unsigned_end:\n\t"
2369 "xor %ebx,%ebx\n\t"
2370 "lea 0x8(%esp),%esp");
2371 }
2372
2373 static void
2374 i386_emit_ref (int size)
2375 {
2376 switch (size)
2377 {
2378 case 1:
2379 EMIT_ASM32 (i386_ref1,
2380 "movb (%eax),%al");
2381 break;
2382 case 2:
2383 EMIT_ASM32 (i386_ref2,
2384 "movw (%eax),%ax");
2385 break;
2386 case 4:
2387 EMIT_ASM32 (i386_ref4,
2388 "movl (%eax),%eax");
2389 break;
2390 case 8:
2391 EMIT_ASM32 (i386_ref8,
2392 "movl 4(%eax),%ebx\n\t"
2393 "movl (%eax),%eax");
2394 break;
2395 }
2396 }
2397
2398 static void
2399 i386_emit_if_goto (int *offset_p, int *size_p)
2400 {
2401 EMIT_ASM32 (i386_if_goto,
2402 "mov %eax,%ecx\n\t"
2403 "or %ebx,%ecx\n\t"
2404 "pop %eax\n\t"
2405 "pop %ebx\n\t"
2406 "cmpl $0,%ecx\n\t"
2407 /* Don't trust the assembler to choose the right jump */
2408 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2409
2410 if (offset_p)
2411 *offset_p = 11; /* be sure that this matches the sequence above */
2412 if (size_p)
2413 *size_p = 4;
2414 }
2415
2416 static void
2417 i386_emit_goto (int *offset_p, int *size_p)
2418 {
2419 EMIT_ASM32 (i386_goto,
2420 /* Don't trust the assembler to choose the right jump */
2421 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2422 if (offset_p)
2423 *offset_p = 1;
2424 if (size_p)
2425 *size_p = 4;
2426 }
2427
2428 static void
2429 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2430 {
2431 int diff = (to - (from + size));
2432 unsigned char buf[sizeof (int)];
2433
2434 /* We're only doing 4-byte sizes at the moment. */
2435 if (size != 4)
2436 {
2437 emit_error = 1;
2438 return;
2439 }
2440
2441 memcpy (buf, &diff, sizeof (int));
2442 target_write_memory (from, buf, sizeof (int));
2443 }
2444
2445 static void
2446 i386_emit_const (LONGEST num)
2447 {
2448 unsigned char buf[16];
2449 int i, hi, lo;
2450 CORE_ADDR buildaddr = current_insn_ptr;
2451
2452 i = 0;
2453 buf[i++] = 0xb8; /* mov $<n>,%eax */
2454 lo = num & 0xffffffff;
2455 memcpy (&buf[i], &lo, sizeof (lo));
2456 i += 4;
2457 hi = ((num >> 32) & 0xffffffff);
2458 if (hi)
2459 {
2460 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2461 memcpy (&buf[i], &hi, sizeof (hi));
2462 i += 4;
2463 }
2464 else
2465 {
2466 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2467 }
2468 append_insns (&buildaddr, i, buf);
2469 current_insn_ptr = buildaddr;
2470 }
2471
2472 static void
2473 i386_emit_call (CORE_ADDR fn)
2474 {
2475 unsigned char buf[16];
2476 int i, offset;
2477 CORE_ADDR buildaddr;
2478
2479 buildaddr = current_insn_ptr;
2480 i = 0;
2481 buf[i++] = 0xe8; /* call <reladdr> */
2482 offset = ((int) fn) - (buildaddr + 5);
2483 memcpy (buf + 1, &offset, 4);
2484 append_insns (&buildaddr, 5, buf);
2485 current_insn_ptr = buildaddr;
2486 }
2487
2488 static void
2489 i386_emit_reg (int reg)
2490 {
2491 unsigned char buf[16];
2492 int i;
2493 CORE_ADDR buildaddr;
2494
2495 EMIT_ASM32 (i386_reg_a,
2496 "sub $0x8,%esp");
2497 buildaddr = current_insn_ptr;
2498 i = 0;
2499 buf[i++] = 0xb8; /* mov $<n>,%eax */
2500 memcpy (&buf[i], &reg, sizeof (reg));
2501 i += 4;
2502 append_insns (&buildaddr, i, buf);
2503 current_insn_ptr = buildaddr;
2504 EMIT_ASM32 (i386_reg_b,
2505 "mov %eax,4(%esp)\n\t"
2506 "mov 8(%ebp),%eax\n\t"
2507 "mov %eax,(%esp)");
2508 i386_emit_call (get_raw_reg_func_addr ());
2509 EMIT_ASM32 (i386_reg_c,
2510 "xor %ebx,%ebx\n\t"
2511 "lea 0x8(%esp),%esp");
2512 }
2513
2514 static void
2515 i386_emit_pop (void)
2516 {
2517 EMIT_ASM32 (i386_pop,
2518 "pop %eax\n\t"
2519 "pop %ebx");
2520 }
2521
2522 static void
2523 i386_emit_stack_flush (void)
2524 {
2525 EMIT_ASM32 (i386_stack_flush,
2526 "push %ebx\n\t"
2527 "push %eax");
2528 }
2529
2530 static void
2531 i386_emit_zero_ext (int arg)
2532 {
2533 switch (arg)
2534 {
2535 case 8:
2536 EMIT_ASM32 (i386_zero_ext_8,
2537 "and $0xff,%eax\n\t"
2538 "xor %ebx,%ebx");
2539 break;
2540 case 16:
2541 EMIT_ASM32 (i386_zero_ext_16,
2542 "and $0xffff,%eax\n\t"
2543 "xor %ebx,%ebx");
2544 break;
2545 case 32:
2546 EMIT_ASM32 (i386_zero_ext_32,
2547 "xor %ebx,%ebx");
2548 break;
2549 default:
2550 emit_error = 1;
2551 }
2552 }
2553
2554 static void
2555 i386_emit_swap (void)
2556 {
2557 EMIT_ASM32 (i386_swap,
2558 "mov %eax,%ecx\n\t"
2559 "mov %ebx,%edx\n\t"
2560 "pop %eax\n\t"
2561 "pop %ebx\n\t"
2562 "push %edx\n\t"
2563 "push %ecx");
2564 }
2565
2566 static void
2567 i386_emit_stack_adjust (int n)
2568 {
2569 unsigned char buf[16];
2570 int i;
2571 CORE_ADDR buildaddr = current_insn_ptr;
2572
2573 i = 0;
2574 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2575 buf[i++] = 0x64;
2576 buf[i++] = 0x24;
2577 buf[i++] = n * 8;
2578 append_insns (&buildaddr, i, buf);
2579 current_insn_ptr = buildaddr;
2580 }
2581
2582 /* FN's prototype is `LONGEST(*fn)(int)'. */
2583
2584 static void
2585 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2586 {
2587 unsigned char buf[16];
2588 int i;
2589 CORE_ADDR buildaddr;
2590
2591 EMIT_ASM32 (i386_int_call_1_a,
2592 /* Reserve a bit of stack space. */
2593 "sub $0x8,%esp");
2594 /* Put the one argument on the stack. */
2595 buildaddr = current_insn_ptr;
2596 i = 0;
2597 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2598 buf[i++] = 0x04;
2599 buf[i++] = 0x24;
2600 memcpy (&buf[i], &arg1, sizeof (arg1));
2601 i += 4;
2602 append_insns (&buildaddr, i, buf);
2603 current_insn_ptr = buildaddr;
2604 i386_emit_call (fn);
2605 EMIT_ASM32 (i386_int_call_1_c,
2606 "mov %edx,%ebx\n\t"
2607 "lea 0x8(%esp),%esp");
2608 }
2609
2610 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2611
2612 static void
2613 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2614 {
2615 unsigned char buf[16];
2616 int i;
2617 CORE_ADDR buildaddr;
2618
2619 EMIT_ASM32 (i386_void_call_2_a,
2620 /* Preserve %eax only; we don't have to worry about %ebx. */
2621 "push %eax\n\t"
2622 /* Reserve a bit of stack space for arguments. */
2623 "sub $0x10,%esp\n\t"
2624 /* Copy "top" to the second argument position. (Note that
2625 we can't assume function won't scribble on its
2626 arguments, so don't try to restore from this.) */
2627 "mov %eax,4(%esp)\n\t"
2628 "mov %ebx,8(%esp)");
2629 /* Put the first argument on the stack. */
2630 buildaddr = current_insn_ptr;
2631 i = 0;
2632 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2633 buf[i++] = 0x04;
2634 buf[i++] = 0x24;
2635 memcpy (&buf[i], &arg1, sizeof (arg1));
2636 i += 4;
2637 append_insns (&buildaddr, i, buf);
2638 current_insn_ptr = buildaddr;
2639 i386_emit_call (fn);
2640 EMIT_ASM32 (i386_void_call_2_b,
2641 "lea 0x10(%esp),%esp\n\t"
2642 /* Restore original stack top. */
2643 "pop %eax");
2644 }
2645
2646
2647 static void
2648 i386_emit_eq_goto (int *offset_p, int *size_p)
2649 {
2650 EMIT_ASM32 (eq,
2651 /* Check low half first, more likely to be decider */
2652 "cmpl %eax,(%esp)\n\t"
2653 "jne .Leq_fallthru\n\t"
2654 "cmpl %ebx,4(%esp)\n\t"
2655 "jne .Leq_fallthru\n\t"
2656 "lea 0x8(%esp),%esp\n\t"
2657 "pop %eax\n\t"
2658 "pop %ebx\n\t"
2659 /* jmp, but don't trust the assembler to choose the right jump */
2660 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2661 ".Leq_fallthru:\n\t"
2662 "lea 0x8(%esp),%esp\n\t"
2663 "pop %eax\n\t"
2664 "pop %ebx");
2665
2666 if (offset_p)
2667 *offset_p = 18;
2668 if (size_p)
2669 *size_p = 4;
2670 }
2671
2672 static void
2673 i386_emit_ne_goto (int *offset_p, int *size_p)
2674 {
2675 EMIT_ASM32 (ne,
2676 /* Check low half first, more likely to be decider */
2677 "cmpl %eax,(%esp)\n\t"
2678 "jne .Lne_jump\n\t"
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "je .Lne_fallthru\n\t"
2681 ".Lne_jump:\n\t"
2682 "lea 0x8(%esp),%esp\n\t"
2683 "pop %eax\n\t"
2684 "pop %ebx\n\t"
2685 /* jmp, but don't trust the assembler to choose the right jump */
2686 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2687 ".Lne_fallthru:\n\t"
2688 "lea 0x8(%esp),%esp\n\t"
2689 "pop %eax\n\t"
2690 "pop %ebx");
2691
2692 if (offset_p)
2693 *offset_p = 18;
2694 if (size_p)
2695 *size_p = 4;
2696 }
2697
2698 static void
2699 i386_emit_lt_goto (int *offset_p, int *size_p)
2700 {
2701 EMIT_ASM32 (lt,
2702 "cmpl %ebx,4(%esp)\n\t"
2703 "jl .Llt_jump\n\t"
2704 "jne .Llt_fallthru\n\t"
2705 "cmpl %eax,(%esp)\n\t"
2706 "jnl .Llt_fallthru\n\t"
2707 ".Llt_jump:\n\t"
2708 "lea 0x8(%esp),%esp\n\t"
2709 "pop %eax\n\t"
2710 "pop %ebx\n\t"
2711 /* jmp, but don't trust the assembler to choose the right jump */
2712 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2713 ".Llt_fallthru:\n\t"
2714 "lea 0x8(%esp),%esp\n\t"
2715 "pop %eax\n\t"
2716 "pop %ebx");
2717
2718 if (offset_p)
2719 *offset_p = 20;
2720 if (size_p)
2721 *size_p = 4;
2722 }
2723
2724 static void
2725 i386_emit_le_goto (int *offset_p, int *size_p)
2726 {
2727 EMIT_ASM32 (le,
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jle .Lle_jump\n\t"
2730 "jne .Lle_fallthru\n\t"
2731 "cmpl %eax,(%esp)\n\t"
2732 "jnle .Lle_fallthru\n\t"
2733 ".Lle_jump:\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2735 "pop %eax\n\t"
2736 "pop %ebx\n\t"
2737 /* jmp, but don't trust the assembler to choose the right jump */
2738 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2739 ".Lle_fallthru:\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2741 "pop %eax\n\t"
2742 "pop %ebx");
2743
2744 if (offset_p)
2745 *offset_p = 20;
2746 if (size_p)
2747 *size_p = 4;
2748 }
2749
2750 static void
2751 i386_emit_gt_goto (int *offset_p, int *size_p)
2752 {
2753 EMIT_ASM32 (gt,
2754 "cmpl %ebx,4(%esp)\n\t"
2755 "jg .Lgt_jump\n\t"
2756 "jne .Lgt_fallthru\n\t"
2757 "cmpl %eax,(%esp)\n\t"
2758 "jng .Lgt_fallthru\n\t"
2759 ".Lgt_jump:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Lgt_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx");
2769
2770 if (offset_p)
2771 *offset_p = 20;
2772 if (size_p)
2773 *size_p = 4;
2774 }
2775
2776 static void
2777 i386_emit_ge_goto (int *offset_p, int *size_p)
2778 {
2779 EMIT_ASM32 (ge,
2780 "cmpl %ebx,4(%esp)\n\t"
2781 "jge .Lge_jump\n\t"
2782 "jne .Lge_fallthru\n\t"
2783 "cmpl %eax,(%esp)\n\t"
2784 "jnge .Lge_fallthru\n\t"
2785 ".Lge_jump:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Lge_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2793 "pop %eax\n\t"
2794 "pop %ebx");
2795
2796 if (offset_p)
2797 *offset_p = 20;
2798 if (size_p)
2799 *size_p = 4;
2800 }
2801
2802 struct emit_ops i386_emit_ops =
2803 {
2804 i386_emit_prologue,
2805 i386_emit_epilogue,
2806 i386_emit_add,
2807 i386_emit_sub,
2808 i386_emit_mul,
2809 i386_emit_lsh,
2810 i386_emit_rsh_signed,
2811 i386_emit_rsh_unsigned,
2812 i386_emit_ext,
2813 i386_emit_log_not,
2814 i386_emit_bit_and,
2815 i386_emit_bit_or,
2816 i386_emit_bit_xor,
2817 i386_emit_bit_not,
2818 i386_emit_equal,
2819 i386_emit_less_signed,
2820 i386_emit_less_unsigned,
2821 i386_emit_ref,
2822 i386_emit_if_goto,
2823 i386_emit_goto,
2824 i386_write_goto_address,
2825 i386_emit_const,
2826 i386_emit_call,
2827 i386_emit_reg,
2828 i386_emit_pop,
2829 i386_emit_stack_flush,
2830 i386_emit_zero_ext,
2831 i386_emit_swap,
2832 i386_emit_stack_adjust,
2833 i386_emit_int_call_1,
2834 i386_emit_void_call_2,
2835 i386_emit_eq_goto,
2836 i386_emit_ne_goto,
2837 i386_emit_lt_goto,
2838 i386_emit_le_goto,
2839 i386_emit_gt_goto,
2840 i386_emit_ge_goto
2841 };
2842
2843
2844 static struct emit_ops *
2845 x86_emit_ops (void)
2846 {
2847 #ifdef __x86_64__
2848 if (is_64bit_tdesc ())
2849 return &amd64_emit_ops;
2850 else
2851 #endif
2852 return &i386_emit_ops;
2853 }
2854
2855 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2856
2857 static const gdb_byte *
2858 x86_sw_breakpoint_from_kind (int kind, int *size)
2859 {
2860 *size = x86_breakpoint_len;
2861 return x86_breakpoint;
2862 }
2863
2864 static int
2865 x86_supports_range_stepping (void)
2866 {
2867 return 1;
2868 }
2869
2870 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2871 */
2872
2873 static int
2874 x86_supports_hardware_single_step (void)
2875 {
2876 return 1;
2877 }
2878
2879 static int
2880 x86_get_ipa_tdesc_idx (void)
2881 {
2882 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2883 const struct target_desc *tdesc = regcache->tdesc;
2884
2885 #ifdef __x86_64__
2886 return amd64_get_ipa_tdesc_idx (tdesc);
2887 #endif
2888
2889 if (tdesc == tdesc_i386_linux_no_xml)
2890 return X86_TDESC_SSE;
2891
2892 return i386_get_ipa_tdesc_idx (tdesc);
2893 }
2894
2895 /* This is initialized assuming an amd64 target.
2896 x86_arch_setup will correct it for i386 or amd64 targets. */
2897
2898 struct linux_target_ops the_low_target =
2899 {
2900 NULL, /* breakpoint_kind_from_pc */
2901 x86_sw_breakpoint_from_kind,
2902 NULL,
2903 1,
2904 x86_breakpoint_at,
2905 x86_supports_z_point_type,
2906 x86_insert_point,
2907 x86_remove_point,
2908 x86_stopped_by_watchpoint,
2909 x86_stopped_data_address,
2910 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2911 native i386 case (no registers smaller than an xfer unit), and are not
2912 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2913 NULL,
2914 NULL,
2915 /* need to fix up i386 siginfo if host is amd64 */
2916 x86_siginfo_fixup,
2917 x86_linux_new_process,
2918 x86_linux_delete_process,
2919 x86_linux_new_thread,
2920 x86_linux_delete_thread,
2921 x86_linux_new_fork,
2922 x86_linux_prepare_to_resume,
2923 x86_linux_process_qsupported,
2924 x86_supports_tracepoints,
2925 x86_get_thread_area,
2926 x86_install_fast_tracepoint_jump_pad,
2927 x86_emit_ops,
2928 x86_get_min_fast_tracepoint_insn_len,
2929 x86_supports_range_stepping,
2930 NULL, /* breakpoint_kind_from_current_state */
2931 x86_supports_hardware_single_step,
2932 x86_get_syscall_trapinfo,
2933 x86_get_ipa_tdesc_idx,
2934 };
2935
2936 /* The linux target ops object. */
2937
2938 linux_process_target *the_linux_target = &the_x86_target;
2939
2940 void
2941 initialize_low_arch (void)
2942 {
2943 /* Initialize the Linux target descriptions. */
2944 #ifdef __x86_64__
2945 tdesc_amd64_linux_no_xml = allocate_target_description ();
2946 copy_target_description (tdesc_amd64_linux_no_xml,
2947 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2948 false));
2949 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2950 #endif
2951
2952 tdesc_i386_linux_no_xml = allocate_target_description ();
2953 copy_target_description (tdesc_i386_linux_no_xml,
2954 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2955 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2956
2957 initialize_regsets_info (&x86_regsets_info);
2958 }
This page took 0.092283 seconds and 5 git commands to generate.