gdbserver/linux-low: turn 'breakpoint_at' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 protected:
112
113 void low_arch_setup () override;
114
115 bool low_cannot_fetch_register (int regno) override;
116
117 bool low_cannot_store_register (int regno) override;
118
119 bool low_supports_breakpoints () override;
120
121 CORE_ADDR low_get_pc (regcache *regcache) override;
122
123 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
124
125 int low_decr_pc_after_break () override;
126
127 bool low_breakpoint_at (CORE_ADDR pc) override;
128 };
129
130 /* The singleton target ops object. */
131
132 static x86_target the_x86_target;
133
134 /* Per-process arch-specific data we want to keep. */
135
136 struct arch_process_info
137 {
138 struct x86_debug_reg_state debug_reg_state;
139 };
140
141 #ifdef __x86_64__
142
143 /* Mapping between the general-purpose registers in `struct user'
144 format and GDB's register array layout.
145 Note that the transfer layout uses 64-bit regs. */
146 static /*const*/ int i386_regmap[] =
147 {
148 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
149 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
150 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
151 DS * 8, ES * 8, FS * 8, GS * 8
152 };
153
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156 /* So code below doesn't have to care, i386 or amd64. */
157 #define ORIG_EAX ORIG_RAX
158 #define REGSIZE 8
159
160 static const int x86_64_regmap[] =
161 {
162 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
163 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
164 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
165 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
166 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
167 DS * 8, ES * 8, FS * 8, GS * 8,
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 -1, -1, -1, -1, -1, -1, -1, -1,
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 ORIG_RAX * 8,
174 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
175 21 * 8, 22 * 8,
176 #else
177 -1, -1,
178 #endif
179 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
180 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
181 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
182 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
186 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1 /* pkru */
191 };
192
193 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
194 #define X86_64_USER_REGS (GS + 1)
195
196 #else /* ! __x86_64__ */
197
198 /* Mapping between the general-purpose registers in `struct user'
199 format and GDB's register array layout. */
200 static /*const*/ int i386_regmap[] =
201 {
202 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
203 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
204 EIP * 4, EFL * 4, CS * 4, SS * 4,
205 DS * 4, ES * 4, FS * 4, GS * 4
206 };
207
208 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
209
210 #define REGSIZE 4
211
212 #endif
213
214 #ifdef __x86_64__
215
216 /* Returns true if the current inferior belongs to a x86-64 process,
217 per the tdesc. */
218
219 static int
220 is_64bit_tdesc (void)
221 {
222 struct regcache *regcache = get_thread_regcache (current_thread, 0);
223
224 return register_size (regcache->tdesc, 0) == 8;
225 }
226
227 #endif
228
229 \f
230 /* Called by libthread_db. */
231
232 ps_err_e
233 ps_get_thread_area (struct ps_prochandle *ph,
234 lwpid_t lwpid, int idx, void **base)
235 {
236 #ifdef __x86_64__
237 int use_64bit = is_64bit_tdesc ();
238
239 if (use_64bit)
240 {
241 switch (idx)
242 {
243 case FS:
244 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
245 return PS_OK;
246 break;
247 case GS:
248 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
249 return PS_OK;
250 break;
251 default:
252 return PS_BADADDR;
253 }
254 return PS_ERR;
255 }
256 #endif
257
258 {
259 unsigned int desc[4];
260
261 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
262 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
263 return PS_ERR;
264
265 /* Ensure we properly extend the value to 64-bits for x86_64. */
266 *base = (void *) (uintptr_t) desc[1];
267 return PS_OK;
268 }
269 }
270
271 /* Get the thread area address. This is used to recognize which
272 thread is which when tracing with the in-process agent library. We
273 don't read anything from the address, and treat it as opaque; it's
274 the address itself that we assume is unique per-thread. */
275
276 static int
277 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
278 {
279 #ifdef __x86_64__
280 int use_64bit = is_64bit_tdesc ();
281
282 if (use_64bit)
283 {
284 void *base;
285 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
286 {
287 *addr = (CORE_ADDR) (uintptr_t) base;
288 return 0;
289 }
290
291 return -1;
292 }
293 #endif
294
295 {
296 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
297 struct thread_info *thr = get_lwp_thread (lwp);
298 struct regcache *regcache = get_thread_regcache (thr, 1);
299 unsigned int desc[4];
300 ULONGEST gs = 0;
301 const int reg_thread_area = 3; /* bits to scale down register value. */
302 int idx;
303
304 collect_register_by_name (regcache, "gs", &gs);
305
306 idx = gs >> reg_thread_area;
307
308 if (ptrace (PTRACE_GET_THREAD_AREA,
309 lwpid_of (thr),
310 (void *) (long) idx, (unsigned long) &desc) < 0)
311 return -1;
312
313 *addr = desc[1];
314 return 0;
315 }
316 }
317
318
319 \f
320 bool
321 x86_target::low_cannot_store_register (int regno)
322 {
323 #ifdef __x86_64__
324 if (is_64bit_tdesc ())
325 return false;
326 #endif
327
328 return regno >= I386_NUM_REGS;
329 }
330
331 bool
332 x86_target::low_cannot_fetch_register (int regno)
333 {
334 #ifdef __x86_64__
335 if (is_64bit_tdesc ())
336 return false;
337 #endif
338
339 return regno >= I386_NUM_REGS;
340 }
341
342 static void
343 x86_fill_gregset (struct regcache *regcache, void *buf)
344 {
345 int i;
346
347 #ifdef __x86_64__
348 if (register_size (regcache->tdesc, 0) == 8)
349 {
350 for (i = 0; i < X86_64_NUM_REGS; i++)
351 if (x86_64_regmap[i] != -1)
352 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
353
354 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
355 {
356 unsigned long base;
357 int lwpid = lwpid_of (current_thread);
358
359 collect_register_by_name (regcache, "fs_base", &base);
360 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
361
362 collect_register_by_name (regcache, "gs_base", &base);
363 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
364 }
365 #endif
366
367 return;
368 }
369
370 /* 32-bit inferior registers need to be zero-extended.
371 Callers would read uninitialized memory otherwise. */
372 memset (buf, 0x00, X86_64_USER_REGS * 8);
373 #endif
374
375 for (i = 0; i < I386_NUM_REGS; i++)
376 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
377
378 collect_register_by_name (regcache, "orig_eax",
379 ((char *) buf) + ORIG_EAX * REGSIZE);
380
381 #ifdef __x86_64__
382 /* Sign extend EAX value to avoid potential syscall restart
383 problems.
384
385 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
386 for a detailed explanation. */
387 if (register_size (regcache->tdesc, 0) == 4)
388 {
389 void *ptr = ((gdb_byte *) buf
390 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
391
392 *(int64_t *) ptr = *(int32_t *) ptr;
393 }
394 #endif
395 }
396
397 static void
398 x86_store_gregset (struct regcache *regcache, const void *buf)
399 {
400 int i;
401
402 #ifdef __x86_64__
403 if (register_size (regcache->tdesc, 0) == 8)
404 {
405 for (i = 0; i < X86_64_NUM_REGS; i++)
406 if (x86_64_regmap[i] != -1)
407 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
408
409 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
410 {
411 unsigned long base;
412 int lwpid = lwpid_of (current_thread);
413
414 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
415 supply_register_by_name (regcache, "fs_base", &base);
416
417 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
418 supply_register_by_name (regcache, "gs_base", &base);
419 }
420 #endif
421 return;
422 }
423 #endif
424
425 for (i = 0; i < I386_NUM_REGS; i++)
426 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
427
428 supply_register_by_name (regcache, "orig_eax",
429 ((char *) buf) + ORIG_EAX * REGSIZE);
430 }
431
432 static void
433 x86_fill_fpregset (struct regcache *regcache, void *buf)
434 {
435 #ifdef __x86_64__
436 i387_cache_to_fxsave (regcache, buf);
437 #else
438 i387_cache_to_fsave (regcache, buf);
439 #endif
440 }
441
442 static void
443 x86_store_fpregset (struct regcache *regcache, const void *buf)
444 {
445 #ifdef __x86_64__
446 i387_fxsave_to_cache (regcache, buf);
447 #else
448 i387_fsave_to_cache (regcache, buf);
449 #endif
450 }
451
452 #ifndef __x86_64__
453
454 static void
455 x86_fill_fpxregset (struct regcache *regcache, void *buf)
456 {
457 i387_cache_to_fxsave (regcache, buf);
458 }
459
460 static void
461 x86_store_fpxregset (struct regcache *regcache, const void *buf)
462 {
463 i387_fxsave_to_cache (regcache, buf);
464 }
465
466 #endif
467
468 static void
469 x86_fill_xstateregset (struct regcache *regcache, void *buf)
470 {
471 i387_cache_to_xsave (regcache, buf);
472 }
473
474 static void
475 x86_store_xstateregset (struct regcache *regcache, const void *buf)
476 {
477 i387_xsave_to_cache (regcache, buf);
478 }
479
480 /* ??? The non-biarch i386 case stores all the i387 regs twice.
481 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
482 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
483 doesn't work. IWBN to avoid the duplication in the case where it
484 does work. Maybe the arch_setup routine could check whether it works
485 and update the supported regsets accordingly. */
486
487 static struct regset_info x86_regsets[] =
488 {
489 #ifdef HAVE_PTRACE_GETREGS
490 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
491 GENERAL_REGS,
492 x86_fill_gregset, x86_store_gregset },
493 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
494 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
495 # ifndef __x86_64__
496 # ifdef HAVE_PTRACE_GETFPXREGS
497 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
498 EXTENDED_REGS,
499 x86_fill_fpxregset, x86_store_fpxregset },
500 # endif
501 # endif
502 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
503 FP_REGS,
504 x86_fill_fpregset, x86_store_fpregset },
505 #endif /* HAVE_PTRACE_GETREGS */
506 NULL_REGSET
507 };
508
509 bool
510 x86_target::low_supports_breakpoints ()
511 {
512 return true;
513 }
514
515 CORE_ADDR
516 x86_target::low_get_pc (regcache *regcache)
517 {
518 int use_64bit = register_size (regcache->tdesc, 0) == 8;
519
520 if (use_64bit)
521 {
522 uint64_t pc;
523
524 collect_register_by_name (regcache, "rip", &pc);
525 return (CORE_ADDR) pc;
526 }
527 else
528 {
529 uint32_t pc;
530
531 collect_register_by_name (regcache, "eip", &pc);
532 return (CORE_ADDR) pc;
533 }
534 }
535
536 void
537 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
538 {
539 int use_64bit = register_size (regcache->tdesc, 0) == 8;
540
541 if (use_64bit)
542 {
543 uint64_t newpc = pc;
544
545 supply_register_by_name (regcache, "rip", &newpc);
546 }
547 else
548 {
549 uint32_t newpc = pc;
550
551 supply_register_by_name (regcache, "eip", &newpc);
552 }
553 }
554
555 int
556 x86_target::low_decr_pc_after_break ()
557 {
558 return 1;
559 }
560
561 \f
562 static const gdb_byte x86_breakpoint[] = { 0xCC };
563 #define x86_breakpoint_len 1
564
565 bool
566 x86_target::low_breakpoint_at (CORE_ADDR pc)
567 {
568 unsigned char c;
569
570 read_memory (pc, &c, 1);
571 if (c == 0xCC)
572 return true;
573
574 return false;
575 }
576 \f
577 /* Low-level function vector. */
578 struct x86_dr_low_type x86_dr_low =
579 {
580 x86_linux_dr_set_control,
581 x86_linux_dr_set_addr,
582 x86_linux_dr_get_addr,
583 x86_linux_dr_get_status,
584 x86_linux_dr_get_control,
585 sizeof (void *),
586 };
587 \f
588 /* Breakpoint/Watchpoint support. */
589
590 static int
591 x86_supports_z_point_type (char z_type)
592 {
593 switch (z_type)
594 {
595 case Z_PACKET_SW_BP:
596 case Z_PACKET_HW_BP:
597 case Z_PACKET_WRITE_WP:
598 case Z_PACKET_ACCESS_WP:
599 return 1;
600 default:
601 return 0;
602 }
603 }
604
605 static int
606 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
607 int size, struct raw_breakpoint *bp)
608 {
609 struct process_info *proc = current_process ();
610
611 switch (type)
612 {
613 case raw_bkpt_type_hw:
614 case raw_bkpt_type_write_wp:
615 case raw_bkpt_type_access_wp:
616 {
617 enum target_hw_bp_type hw_type
618 = raw_bkpt_type_to_target_hw_bp_type (type);
619 struct x86_debug_reg_state *state
620 = &proc->priv->arch_private->debug_reg_state;
621
622 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
623 }
624
625 default:
626 /* Unsupported. */
627 return 1;
628 }
629 }
630
631 static int
632 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
633 int size, struct raw_breakpoint *bp)
634 {
635 struct process_info *proc = current_process ();
636
637 switch (type)
638 {
639 case raw_bkpt_type_hw:
640 case raw_bkpt_type_write_wp:
641 case raw_bkpt_type_access_wp:
642 {
643 enum target_hw_bp_type hw_type
644 = raw_bkpt_type_to_target_hw_bp_type (type);
645 struct x86_debug_reg_state *state
646 = &proc->priv->arch_private->debug_reg_state;
647
648 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
649 }
650 default:
651 /* Unsupported. */
652 return 1;
653 }
654 }
655
656 static int
657 x86_stopped_by_watchpoint (void)
658 {
659 struct process_info *proc = current_process ();
660 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
661 }
662
663 static CORE_ADDR
664 x86_stopped_data_address (void)
665 {
666 struct process_info *proc = current_process ();
667 CORE_ADDR addr;
668 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
669 &addr))
670 return addr;
671 return 0;
672 }
673 \f
674 /* Called when a new process is created. */
675
676 static struct arch_process_info *
677 x86_linux_new_process (void)
678 {
679 struct arch_process_info *info = XCNEW (struct arch_process_info);
680
681 x86_low_init_dregs (&info->debug_reg_state);
682
683 return info;
684 }
685
686 /* Called when a process is being deleted. */
687
688 static void
689 x86_linux_delete_process (struct arch_process_info *info)
690 {
691 xfree (info);
692 }
693
694 /* Target routine for linux_new_fork. */
695
696 static void
697 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
698 {
699 /* These are allocated by linux_add_process. */
700 gdb_assert (parent->priv != NULL
701 && parent->priv->arch_private != NULL);
702 gdb_assert (child->priv != NULL
703 && child->priv->arch_private != NULL);
704
705 /* Linux kernel before 2.6.33 commit
706 72f674d203cd230426437cdcf7dd6f681dad8b0d
707 will inherit hardware debug registers from parent
708 on fork/vfork/clone. Newer Linux kernels create such tasks with
709 zeroed debug registers.
710
711 GDB core assumes the child inherits the watchpoints/hw
712 breakpoints of the parent, and will remove them all from the
713 forked off process. Copy the debug registers mirrors into the
714 new process so that all breakpoints and watchpoints can be
715 removed together. The debug registers mirror will become zeroed
716 in the end before detaching the forked off process, thus making
717 this compatible with older Linux kernels too. */
718
719 *child->priv->arch_private = *parent->priv->arch_private;
720 }
721
722 /* See nat/x86-dregs.h. */
723
724 struct x86_debug_reg_state *
725 x86_debug_reg_state (pid_t pid)
726 {
727 struct process_info *proc = find_process_pid (pid);
728
729 return &proc->priv->arch_private->debug_reg_state;
730 }
731 \f
732 /* When GDBSERVER is built as a 64-bit application on linux, the
733 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
734 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
735 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
736 conversion in-place ourselves. */
737
738 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
739 layout of the inferiors' architecture. Returns true if any
740 conversion was done; false otherwise. If DIRECTION is 1, then copy
741 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
742 INF. */
743
744 static int
745 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
746 {
747 #ifdef __x86_64__
748 unsigned int machine;
749 int tid = lwpid_of (current_thread);
750 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
751
752 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
753 if (!is_64bit_tdesc ())
754 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
755 FIXUP_32);
756 /* No fixup for native x32 GDB. */
757 else if (!is_elf64 && sizeof (void *) == 8)
758 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
759 FIXUP_X32);
760 #endif
761
762 return 0;
763 }
764 \f
765 static int use_xml;
766
767 /* Format of XSAVE extended state is:
768 struct
769 {
770 fxsave_bytes[0..463]
771 sw_usable_bytes[464..511]
772 xstate_hdr_bytes[512..575]
773 avx_bytes[576..831]
774 future_state etc
775 };
776
777 Same memory layout will be used for the coredump NT_X86_XSTATE
778 representing the XSAVE extended state registers.
779
780 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
781 extended state mask, which is the same as the extended control register
782 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
783 together with the mask saved in the xstate_hdr_bytes to determine what
784 states the processor/OS supports and what state, used or initialized,
785 the process/thread is in. */
786 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
787
788 /* Does the current host support the GETFPXREGS request? The header
789 file may or may not define it, and even if it is defined, the
790 kernel will return EIO if it's running on a pre-SSE processor. */
791 int have_ptrace_getfpxregs =
792 #ifdef HAVE_PTRACE_GETFPXREGS
793 -1
794 #else
795 0
796 #endif
797 ;
798
799 /* Get Linux/x86 target description from running target. */
800
801 static const struct target_desc *
802 x86_linux_read_description (void)
803 {
804 unsigned int machine;
805 int is_elf64;
806 int xcr0_features;
807 int tid;
808 static uint64_t xcr0;
809 struct regset_info *regset;
810
811 tid = lwpid_of (current_thread);
812
813 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
814
815 if (sizeof (void *) == 4)
816 {
817 if (is_elf64 > 0)
818 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
819 #ifndef __x86_64__
820 else if (machine == EM_X86_64)
821 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
822 #endif
823 }
824
825 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
826 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
827 {
828 elf_fpxregset_t fpxregs;
829
830 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
831 {
832 have_ptrace_getfpxregs = 0;
833 have_ptrace_getregset = 0;
834 return i386_linux_read_description (X86_XSTATE_X87);
835 }
836 else
837 have_ptrace_getfpxregs = 1;
838 }
839 #endif
840
841 if (!use_xml)
842 {
843 x86_xcr0 = X86_XSTATE_SSE_MASK;
844
845 /* Don't use XML. */
846 #ifdef __x86_64__
847 if (machine == EM_X86_64)
848 return tdesc_amd64_linux_no_xml;
849 else
850 #endif
851 return tdesc_i386_linux_no_xml;
852 }
853
854 if (have_ptrace_getregset == -1)
855 {
856 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
857 struct iovec iov;
858
859 iov.iov_base = xstateregs;
860 iov.iov_len = sizeof (xstateregs);
861
862 /* Check if PTRACE_GETREGSET works. */
863 if (ptrace (PTRACE_GETREGSET, tid,
864 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
865 have_ptrace_getregset = 0;
866 else
867 {
868 have_ptrace_getregset = 1;
869
870 /* Get XCR0 from XSAVE extended state. */
871 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
872 / sizeof (uint64_t))];
873
874 /* Use PTRACE_GETREGSET if it is available. */
875 for (regset = x86_regsets;
876 regset->fill_function != NULL; regset++)
877 if (regset->get_request == PTRACE_GETREGSET)
878 regset->size = X86_XSTATE_SIZE (xcr0);
879 else if (regset->type != GENERAL_REGS)
880 regset->size = 0;
881 }
882 }
883
884 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
885 xcr0_features = (have_ptrace_getregset
886 && (xcr0 & X86_XSTATE_ALL_MASK));
887
888 if (xcr0_features)
889 x86_xcr0 = xcr0;
890
891 if (machine == EM_X86_64)
892 {
893 #ifdef __x86_64__
894 const target_desc *tdesc = NULL;
895
896 if (xcr0_features)
897 {
898 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
899 !is_elf64);
900 }
901
902 if (tdesc == NULL)
903 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
904 return tdesc;
905 #endif
906 }
907 else
908 {
909 const target_desc *tdesc = NULL;
910
911 if (xcr0_features)
912 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
913
914 if (tdesc == NULL)
915 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
916
917 return tdesc;
918 }
919
920 gdb_assert_not_reached ("failed to return tdesc");
921 }
922
923 /* Update all the target description of all processes; a new GDB
924 connected, and it may or not support xml target descriptions. */
925
926 void
927 x86_target::update_xmltarget ()
928 {
929 struct thread_info *saved_thread = current_thread;
930
931 /* Before changing the register cache's internal layout, flush the
932 contents of the current valid caches back to the threads, and
933 release the current regcache objects. */
934 regcache_release ();
935
936 for_each_process ([this] (process_info *proc) {
937 int pid = proc->pid;
938
939 /* Look up any thread of this process. */
940 current_thread = find_any_thread_of_pid (pid);
941
942 low_arch_setup ();
943 });
944
945 current_thread = saved_thread;
946 }
947
948 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
949 PTRACE_GETREGSET. */
950
951 static void
952 x86_linux_process_qsupported (char **features, int count)
953 {
954 int i;
955
956 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
957 with "i386" in qSupported query, it supports x86 XML target
958 descriptions. */
959 use_xml = 0;
960 for (i = 0; i < count; i++)
961 {
962 const char *feature = features[i];
963
964 if (startswith (feature, "xmlRegisters="))
965 {
966 char *copy = xstrdup (feature + 13);
967
968 char *saveptr;
969 for (char *p = strtok_r (copy, ",", &saveptr);
970 p != NULL;
971 p = strtok_r (NULL, ",", &saveptr))
972 {
973 if (strcmp (p, "i386") == 0)
974 {
975 use_xml = 1;
976 break;
977 }
978 }
979
980 free (copy);
981 }
982 }
983 the_x86_target.update_xmltarget ();
984 }
985
986 /* Common for x86/x86-64. */
987
988 static struct regsets_info x86_regsets_info =
989 {
990 x86_regsets, /* regsets */
991 0, /* num_regsets */
992 NULL, /* disabled_regsets */
993 };
994
995 #ifdef __x86_64__
996 static struct regs_info amd64_linux_regs_info =
997 {
998 NULL, /* regset_bitmap */
999 NULL, /* usrregs_info */
1000 &x86_regsets_info
1001 };
1002 #endif
1003 static struct usrregs_info i386_linux_usrregs_info =
1004 {
1005 I386_NUM_REGS,
1006 i386_regmap,
1007 };
1008
1009 static struct regs_info i386_linux_regs_info =
1010 {
1011 NULL, /* regset_bitmap */
1012 &i386_linux_usrregs_info,
1013 &x86_regsets_info
1014 };
1015
1016 const regs_info *
1017 x86_target::get_regs_info ()
1018 {
1019 #ifdef __x86_64__
1020 if (is_64bit_tdesc ())
1021 return &amd64_linux_regs_info;
1022 else
1023 #endif
1024 return &i386_linux_regs_info;
1025 }
1026
1027 /* Initialize the target description for the architecture of the
1028 inferior. */
1029
1030 void
1031 x86_target::low_arch_setup ()
1032 {
1033 current_process ()->tdesc = x86_linux_read_description ();
1034 }
1035
1036 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1037 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1038
1039 static void
1040 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1041 {
1042 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1043
1044 if (use_64bit)
1045 {
1046 long l_sysno;
1047
1048 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1049 *sysno = (int) l_sysno;
1050 }
1051 else
1052 collect_register_by_name (regcache, "orig_eax", sysno);
1053 }
1054
1055 static int
1056 x86_supports_tracepoints (void)
1057 {
1058 return 1;
1059 }
1060
1061 static void
1062 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1063 {
1064 target_write_memory (*to, buf, len);
1065 *to += len;
1066 }
1067
1068 static int
1069 push_opcode (unsigned char *buf, const char *op)
1070 {
1071 unsigned char *buf_org = buf;
1072
1073 while (1)
1074 {
1075 char *endptr;
1076 unsigned long ul = strtoul (op, &endptr, 16);
1077
1078 if (endptr == op)
1079 break;
1080
1081 *buf++ = ul;
1082 op = endptr;
1083 }
1084
1085 return buf - buf_org;
1086 }
1087
1088 #ifdef __x86_64__
1089
1090 /* Build a jump pad that saves registers and calls a collection
1091 function. Writes a jump instruction to the jump pad to
1092 JJUMPAD_INSN. The caller is responsible to write it in at the
1093 tracepoint address. */
1094
1095 static int
1096 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1097 CORE_ADDR collector,
1098 CORE_ADDR lockaddr,
1099 ULONGEST orig_size,
1100 CORE_ADDR *jump_entry,
1101 CORE_ADDR *trampoline,
1102 ULONGEST *trampoline_size,
1103 unsigned char *jjump_pad_insn,
1104 ULONGEST *jjump_pad_insn_size,
1105 CORE_ADDR *adjusted_insn_addr,
1106 CORE_ADDR *adjusted_insn_addr_end,
1107 char *err)
1108 {
1109 unsigned char buf[40];
1110 int i, offset;
1111 int64_t loffset;
1112
1113 CORE_ADDR buildaddr = *jump_entry;
1114
1115 /* Build the jump pad. */
1116
1117 /* First, do tracepoint data collection. Save registers. */
1118 i = 0;
1119 /* Need to ensure stack pointer saved first. */
1120 buf[i++] = 0x54; /* push %rsp */
1121 buf[i++] = 0x55; /* push %rbp */
1122 buf[i++] = 0x57; /* push %rdi */
1123 buf[i++] = 0x56; /* push %rsi */
1124 buf[i++] = 0x52; /* push %rdx */
1125 buf[i++] = 0x51; /* push %rcx */
1126 buf[i++] = 0x53; /* push %rbx */
1127 buf[i++] = 0x50; /* push %rax */
1128 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1129 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1130 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1131 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1132 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1133 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1134 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1135 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1136 buf[i++] = 0x9c; /* pushfq */
1137 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1138 buf[i++] = 0xbf;
1139 memcpy (buf + i, &tpaddr, 8);
1140 i += 8;
1141 buf[i++] = 0x57; /* push %rdi */
1142 append_insns (&buildaddr, i, buf);
1143
1144 /* Stack space for the collecting_t object. */
1145 i = 0;
1146 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1147 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1148 memcpy (buf + i, &tpoint, 8);
1149 i += 8;
1150 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1151 i += push_opcode (&buf[i],
1152 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1153 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1154 append_insns (&buildaddr, i, buf);
1155
1156 /* spin-lock. */
1157 i = 0;
1158 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1159 memcpy (&buf[i], (void *) &lockaddr, 8);
1160 i += 8;
1161 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1162 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1163 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1164 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1165 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1166 append_insns (&buildaddr, i, buf);
1167
1168 /* Set up the gdb_collect call. */
1169 /* At this point, (stack pointer + 0x18) is the base of our saved
1170 register block. */
1171
1172 i = 0;
1173 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1174 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1175
1176 /* tpoint address may be 64-bit wide. */
1177 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1178 memcpy (buf + i, &tpoint, 8);
1179 i += 8;
1180 append_insns (&buildaddr, i, buf);
1181
1182 /* The collector function being in the shared library, may be
1183 >31-bits away off the jump pad. */
1184 i = 0;
1185 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1186 memcpy (buf + i, &collector, 8);
1187 i += 8;
1188 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1189 append_insns (&buildaddr, i, buf);
1190
1191 /* Clear the spin-lock. */
1192 i = 0;
1193 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1194 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1195 memcpy (buf + i, &lockaddr, 8);
1196 i += 8;
1197 append_insns (&buildaddr, i, buf);
1198
1199 /* Remove stack that had been used for the collect_t object. */
1200 i = 0;
1201 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1202 append_insns (&buildaddr, i, buf);
1203
1204 /* Restore register state. */
1205 i = 0;
1206 buf[i++] = 0x48; /* add $0x8,%rsp */
1207 buf[i++] = 0x83;
1208 buf[i++] = 0xc4;
1209 buf[i++] = 0x08;
1210 buf[i++] = 0x9d; /* popfq */
1211 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1212 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1213 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1214 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1215 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1216 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1217 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1218 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1219 buf[i++] = 0x58; /* pop %rax */
1220 buf[i++] = 0x5b; /* pop %rbx */
1221 buf[i++] = 0x59; /* pop %rcx */
1222 buf[i++] = 0x5a; /* pop %rdx */
1223 buf[i++] = 0x5e; /* pop %rsi */
1224 buf[i++] = 0x5f; /* pop %rdi */
1225 buf[i++] = 0x5d; /* pop %rbp */
1226 buf[i++] = 0x5c; /* pop %rsp */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* Now, adjust the original instruction to execute in the jump
1230 pad. */
1231 *adjusted_insn_addr = buildaddr;
1232 relocate_instruction (&buildaddr, tpaddr);
1233 *adjusted_insn_addr_end = buildaddr;
1234
1235 /* Finally, write a jump back to the program. */
1236
1237 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1238 if (loffset > INT_MAX || loffset < INT_MIN)
1239 {
1240 sprintf (err,
1241 "E.Jump back from jump pad too far from tracepoint "
1242 "(offset 0x%" PRIx64 " > int32).", loffset);
1243 return 1;
1244 }
1245
1246 offset = (int) loffset;
1247 memcpy (buf, jump_insn, sizeof (jump_insn));
1248 memcpy (buf + 1, &offset, 4);
1249 append_insns (&buildaddr, sizeof (jump_insn), buf);
1250
1251 /* The jump pad is now built. Wire in a jump to our jump pad. This
1252 is always done last (by our caller actually), so that we can
1253 install fast tracepoints with threads running. This relies on
1254 the agent's atomic write support. */
1255 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1256 if (loffset > INT_MAX || loffset < INT_MIN)
1257 {
1258 sprintf (err,
1259 "E.Jump pad too far from tracepoint "
1260 "(offset 0x%" PRIx64 " > int32).", loffset);
1261 return 1;
1262 }
1263
1264 offset = (int) loffset;
1265
1266 memcpy (buf, jump_insn, sizeof (jump_insn));
1267 memcpy (buf + 1, &offset, 4);
1268 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1269 *jjump_pad_insn_size = sizeof (jump_insn);
1270
1271 /* Return the end address of our pad. */
1272 *jump_entry = buildaddr;
1273
1274 return 0;
1275 }
1276
1277 #endif /* __x86_64__ */
1278
1279 /* Build a jump pad that saves registers and calls a collection
1280 function. Writes a jump instruction to the jump pad to
1281 JJUMPAD_INSN. The caller is responsible to write it in at the
1282 tracepoint address. */
1283
1284 static int
1285 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1286 CORE_ADDR collector,
1287 CORE_ADDR lockaddr,
1288 ULONGEST orig_size,
1289 CORE_ADDR *jump_entry,
1290 CORE_ADDR *trampoline,
1291 ULONGEST *trampoline_size,
1292 unsigned char *jjump_pad_insn,
1293 ULONGEST *jjump_pad_insn_size,
1294 CORE_ADDR *adjusted_insn_addr,
1295 CORE_ADDR *adjusted_insn_addr_end,
1296 char *err)
1297 {
1298 unsigned char buf[0x100];
1299 int i, offset;
1300 CORE_ADDR buildaddr = *jump_entry;
1301
1302 /* Build the jump pad. */
1303
1304 /* First, do tracepoint data collection. Save registers. */
1305 i = 0;
1306 buf[i++] = 0x60; /* pushad */
1307 buf[i++] = 0x68; /* push tpaddr aka $pc */
1308 *((int *)(buf + i)) = (int) tpaddr;
1309 i += 4;
1310 buf[i++] = 0x9c; /* pushf */
1311 buf[i++] = 0x1e; /* push %ds */
1312 buf[i++] = 0x06; /* push %es */
1313 buf[i++] = 0x0f; /* push %fs */
1314 buf[i++] = 0xa0;
1315 buf[i++] = 0x0f; /* push %gs */
1316 buf[i++] = 0xa8;
1317 buf[i++] = 0x16; /* push %ss */
1318 buf[i++] = 0x0e; /* push %cs */
1319 append_insns (&buildaddr, i, buf);
1320
1321 /* Stack space for the collecting_t object. */
1322 i = 0;
1323 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1324
1325 /* Build the object. */
1326 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1327 memcpy (buf + i, &tpoint, 4);
1328 i += 4;
1329 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1330
1331 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1332 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1333 append_insns (&buildaddr, i, buf);
1334
1335 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1336 If we cared for it, this could be using xchg alternatively. */
1337
1338 i = 0;
1339 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1340 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1341 %esp,<lockaddr> */
1342 memcpy (&buf[i], (void *) &lockaddr, 4);
1343 i += 4;
1344 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1345 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1346 append_insns (&buildaddr, i, buf);
1347
1348
1349 /* Set up arguments to the gdb_collect call. */
1350 i = 0;
1351 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1352 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1353 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1354 append_insns (&buildaddr, i, buf);
1355
1356 i = 0;
1357 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1358 append_insns (&buildaddr, i, buf);
1359
1360 i = 0;
1361 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1362 memcpy (&buf[i], (void *) &tpoint, 4);
1363 i += 4;
1364 append_insns (&buildaddr, i, buf);
1365
1366 buf[0] = 0xe8; /* call <reladdr> */
1367 offset = collector - (buildaddr + sizeof (jump_insn));
1368 memcpy (buf + 1, &offset, 4);
1369 append_insns (&buildaddr, 5, buf);
1370 /* Clean up after the call. */
1371 buf[0] = 0x83; /* add $0x8,%esp */
1372 buf[1] = 0xc4;
1373 buf[2] = 0x08;
1374 append_insns (&buildaddr, 3, buf);
1375
1376
1377 /* Clear the spin-lock. This would need the LOCK prefix on older
1378 broken archs. */
1379 i = 0;
1380 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1381 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1382 memcpy (buf + i, &lockaddr, 4);
1383 i += 4;
1384 append_insns (&buildaddr, i, buf);
1385
1386
1387 /* Remove stack that had been used for the collect_t object. */
1388 i = 0;
1389 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1390 append_insns (&buildaddr, i, buf);
1391
1392 i = 0;
1393 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1394 buf[i++] = 0xc4;
1395 buf[i++] = 0x04;
1396 buf[i++] = 0x17; /* pop %ss */
1397 buf[i++] = 0x0f; /* pop %gs */
1398 buf[i++] = 0xa9;
1399 buf[i++] = 0x0f; /* pop %fs */
1400 buf[i++] = 0xa1;
1401 buf[i++] = 0x07; /* pop %es */
1402 buf[i++] = 0x1f; /* pop %ds */
1403 buf[i++] = 0x9d; /* popf */
1404 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1405 buf[i++] = 0xc4;
1406 buf[i++] = 0x04;
1407 buf[i++] = 0x61; /* popad */
1408 append_insns (&buildaddr, i, buf);
1409
1410 /* Now, adjust the original instruction to execute in the jump
1411 pad. */
1412 *adjusted_insn_addr = buildaddr;
1413 relocate_instruction (&buildaddr, tpaddr);
1414 *adjusted_insn_addr_end = buildaddr;
1415
1416 /* Write the jump back to the program. */
1417 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1418 memcpy (buf, jump_insn, sizeof (jump_insn));
1419 memcpy (buf + 1, &offset, 4);
1420 append_insns (&buildaddr, sizeof (jump_insn), buf);
1421
1422 /* The jump pad is now built. Wire in a jump to our jump pad. This
1423 is always done last (by our caller actually), so that we can
1424 install fast tracepoints with threads running. This relies on
1425 the agent's atomic write support. */
1426 if (orig_size == 4)
1427 {
1428 /* Create a trampoline. */
1429 *trampoline_size = sizeof (jump_insn);
1430 if (!claim_trampoline_space (*trampoline_size, trampoline))
1431 {
1432 /* No trampoline space available. */
1433 strcpy (err,
1434 "E.Cannot allocate trampoline space needed for fast "
1435 "tracepoints on 4-byte instructions.");
1436 return 1;
1437 }
1438
1439 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1440 memcpy (buf, jump_insn, sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
1442 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1443
1444 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1445 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1446 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1447 memcpy (buf + 2, &offset, 2);
1448 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1449 *jjump_pad_insn_size = sizeof (small_jump_insn);
1450 }
1451 else
1452 {
1453 /* Else use a 32-bit relative jump instruction. */
1454 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1455 memcpy (buf, jump_insn, sizeof (jump_insn));
1456 memcpy (buf + 1, &offset, 4);
1457 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1458 *jjump_pad_insn_size = sizeof (jump_insn);
1459 }
1460
1461 /* Return the end address of our pad. */
1462 *jump_entry = buildaddr;
1463
1464 return 0;
1465 }
1466
1467 static int
1468 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1469 CORE_ADDR collector,
1470 CORE_ADDR lockaddr,
1471 ULONGEST orig_size,
1472 CORE_ADDR *jump_entry,
1473 CORE_ADDR *trampoline,
1474 ULONGEST *trampoline_size,
1475 unsigned char *jjump_pad_insn,
1476 ULONGEST *jjump_pad_insn_size,
1477 CORE_ADDR *adjusted_insn_addr,
1478 CORE_ADDR *adjusted_insn_addr_end,
1479 char *err)
1480 {
1481 #ifdef __x86_64__
1482 if (is_64bit_tdesc ())
1483 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1484 collector, lockaddr,
1485 orig_size, jump_entry,
1486 trampoline, trampoline_size,
1487 jjump_pad_insn,
1488 jjump_pad_insn_size,
1489 adjusted_insn_addr,
1490 adjusted_insn_addr_end,
1491 err);
1492 #endif
1493
1494 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1495 collector, lockaddr,
1496 orig_size, jump_entry,
1497 trampoline, trampoline_size,
1498 jjump_pad_insn,
1499 jjump_pad_insn_size,
1500 adjusted_insn_addr,
1501 adjusted_insn_addr_end,
1502 err);
1503 }
1504
1505 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1506 architectures. */
1507
1508 static int
1509 x86_get_min_fast_tracepoint_insn_len (void)
1510 {
1511 static int warned_about_fast_tracepoints = 0;
1512
1513 #ifdef __x86_64__
1514 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1515 used for fast tracepoints. */
1516 if (is_64bit_tdesc ())
1517 return 5;
1518 #endif
1519
1520 if (agent_loaded_p ())
1521 {
1522 char errbuf[IPA_BUFSIZ];
1523
1524 errbuf[0] = '\0';
1525
1526 /* On x86, if trampolines are available, then 4-byte jump instructions
1527 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1528 with a 4-byte offset are used instead. */
1529 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1530 return 4;
1531 else
1532 {
1533 /* GDB has no channel to explain to user why a shorter fast
1534 tracepoint is not possible, but at least make GDBserver
1535 mention that something has gone awry. */
1536 if (!warned_about_fast_tracepoints)
1537 {
1538 warning ("4-byte fast tracepoints not available; %s", errbuf);
1539 warned_about_fast_tracepoints = 1;
1540 }
1541 return 5;
1542 }
1543 }
1544 else
1545 {
1546 /* Indicate that the minimum length is currently unknown since the IPA
1547 has not loaded yet. */
1548 return 0;
1549 }
1550 }
1551
1552 static void
1553 add_insns (unsigned char *start, int len)
1554 {
1555 CORE_ADDR buildaddr = current_insn_ptr;
1556
1557 if (debug_threads)
1558 debug_printf ("Adding %d bytes of insn at %s\n",
1559 len, paddress (buildaddr));
1560
1561 append_insns (&buildaddr, len, start);
1562 current_insn_ptr = buildaddr;
1563 }
1564
1565 /* Our general strategy for emitting code is to avoid specifying raw
1566 bytes whenever possible, and instead copy a block of inline asm
1567 that is embedded in the function. This is a little messy, because
1568 we need to keep the compiler from discarding what looks like dead
1569 code, plus suppress various warnings. */
1570
1571 #define EMIT_ASM(NAME, INSNS) \
1572 do \
1573 { \
1574 extern unsigned char start_ ## NAME, end_ ## NAME; \
1575 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1576 __asm__ ("jmp end_" #NAME "\n" \
1577 "\t" "start_" #NAME ":" \
1578 "\t" INSNS "\n" \
1579 "\t" "end_" #NAME ":"); \
1580 } while (0)
1581
1582 #ifdef __x86_64__
1583
1584 #define EMIT_ASM32(NAME,INSNS) \
1585 do \
1586 { \
1587 extern unsigned char start_ ## NAME, end_ ## NAME; \
1588 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1589 __asm__ (".code32\n" \
1590 "\t" "jmp end_" #NAME "\n" \
1591 "\t" "start_" #NAME ":\n" \
1592 "\t" INSNS "\n" \
1593 "\t" "end_" #NAME ":\n" \
1594 ".code64\n"); \
1595 } while (0)
1596
1597 #else
1598
1599 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1600
1601 #endif
1602
1603 #ifdef __x86_64__
1604
1605 static void
1606 amd64_emit_prologue (void)
1607 {
1608 EMIT_ASM (amd64_prologue,
1609 "pushq %rbp\n\t"
1610 "movq %rsp,%rbp\n\t"
1611 "sub $0x20,%rsp\n\t"
1612 "movq %rdi,-8(%rbp)\n\t"
1613 "movq %rsi,-16(%rbp)");
1614 }
1615
1616
1617 static void
1618 amd64_emit_epilogue (void)
1619 {
1620 EMIT_ASM (amd64_epilogue,
1621 "movq -16(%rbp),%rdi\n\t"
1622 "movq %rax,(%rdi)\n\t"
1623 "xor %rax,%rax\n\t"
1624 "leave\n\t"
1625 "ret");
1626 }
1627
1628 static void
1629 amd64_emit_add (void)
1630 {
1631 EMIT_ASM (amd64_add,
1632 "add (%rsp),%rax\n\t"
1633 "lea 0x8(%rsp),%rsp");
1634 }
1635
1636 static void
1637 amd64_emit_sub (void)
1638 {
1639 EMIT_ASM (amd64_sub,
1640 "sub %rax,(%rsp)\n\t"
1641 "pop %rax");
1642 }
1643
1644 static void
1645 amd64_emit_mul (void)
1646 {
1647 emit_error = 1;
1648 }
1649
1650 static void
1651 amd64_emit_lsh (void)
1652 {
1653 emit_error = 1;
1654 }
1655
1656 static void
1657 amd64_emit_rsh_signed (void)
1658 {
1659 emit_error = 1;
1660 }
1661
1662 static void
1663 amd64_emit_rsh_unsigned (void)
1664 {
1665 emit_error = 1;
1666 }
1667
1668 static void
1669 amd64_emit_ext (int arg)
1670 {
1671 switch (arg)
1672 {
1673 case 8:
1674 EMIT_ASM (amd64_ext_8,
1675 "cbtw\n\t"
1676 "cwtl\n\t"
1677 "cltq");
1678 break;
1679 case 16:
1680 EMIT_ASM (amd64_ext_16,
1681 "cwtl\n\t"
1682 "cltq");
1683 break;
1684 case 32:
1685 EMIT_ASM (amd64_ext_32,
1686 "cltq");
1687 break;
1688 default:
1689 emit_error = 1;
1690 }
1691 }
1692
1693 static void
1694 amd64_emit_log_not (void)
1695 {
1696 EMIT_ASM (amd64_log_not,
1697 "test %rax,%rax\n\t"
1698 "sete %cl\n\t"
1699 "movzbq %cl,%rax");
1700 }
1701
1702 static void
1703 amd64_emit_bit_and (void)
1704 {
1705 EMIT_ASM (amd64_and,
1706 "and (%rsp),%rax\n\t"
1707 "lea 0x8(%rsp),%rsp");
1708 }
1709
1710 static void
1711 amd64_emit_bit_or (void)
1712 {
1713 EMIT_ASM (amd64_or,
1714 "or (%rsp),%rax\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716 }
1717
1718 static void
1719 amd64_emit_bit_xor (void)
1720 {
1721 EMIT_ASM (amd64_xor,
1722 "xor (%rsp),%rax\n\t"
1723 "lea 0x8(%rsp),%rsp");
1724 }
1725
1726 static void
1727 amd64_emit_bit_not (void)
1728 {
1729 EMIT_ASM (amd64_bit_not,
1730 "xorq $0xffffffffffffffff,%rax");
1731 }
1732
1733 static void
1734 amd64_emit_equal (void)
1735 {
1736 EMIT_ASM (amd64_equal,
1737 "cmp %rax,(%rsp)\n\t"
1738 "je .Lamd64_equal_true\n\t"
1739 "xor %rax,%rax\n\t"
1740 "jmp .Lamd64_equal_end\n\t"
1741 ".Lamd64_equal_true:\n\t"
1742 "mov $0x1,%rax\n\t"
1743 ".Lamd64_equal_end:\n\t"
1744 "lea 0x8(%rsp),%rsp");
1745 }
1746
1747 static void
1748 amd64_emit_less_signed (void)
1749 {
1750 EMIT_ASM (amd64_less_signed,
1751 "cmp %rax,(%rsp)\n\t"
1752 "jl .Lamd64_less_signed_true\n\t"
1753 "xor %rax,%rax\n\t"
1754 "jmp .Lamd64_less_signed_end\n\t"
1755 ".Lamd64_less_signed_true:\n\t"
1756 "mov $1,%rax\n\t"
1757 ".Lamd64_less_signed_end:\n\t"
1758 "lea 0x8(%rsp),%rsp");
1759 }
1760
1761 static void
1762 amd64_emit_less_unsigned (void)
1763 {
1764 EMIT_ASM (amd64_less_unsigned,
1765 "cmp %rax,(%rsp)\n\t"
1766 "jb .Lamd64_less_unsigned_true\n\t"
1767 "xor %rax,%rax\n\t"
1768 "jmp .Lamd64_less_unsigned_end\n\t"
1769 ".Lamd64_less_unsigned_true:\n\t"
1770 "mov $1,%rax\n\t"
1771 ".Lamd64_less_unsigned_end:\n\t"
1772 "lea 0x8(%rsp),%rsp");
1773 }
1774
1775 static void
1776 amd64_emit_ref (int size)
1777 {
1778 switch (size)
1779 {
1780 case 1:
1781 EMIT_ASM (amd64_ref1,
1782 "movb (%rax),%al");
1783 break;
1784 case 2:
1785 EMIT_ASM (amd64_ref2,
1786 "movw (%rax),%ax");
1787 break;
1788 case 4:
1789 EMIT_ASM (amd64_ref4,
1790 "movl (%rax),%eax");
1791 break;
1792 case 8:
1793 EMIT_ASM (amd64_ref8,
1794 "movq (%rax),%rax");
1795 break;
1796 }
1797 }
1798
1799 static void
1800 amd64_emit_if_goto (int *offset_p, int *size_p)
1801 {
1802 EMIT_ASM (amd64_if_goto,
1803 "mov %rax,%rcx\n\t"
1804 "pop %rax\n\t"
1805 "cmp $0,%rcx\n\t"
1806 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1807 if (offset_p)
1808 *offset_p = 10;
1809 if (size_p)
1810 *size_p = 4;
1811 }
1812
1813 static void
1814 amd64_emit_goto (int *offset_p, int *size_p)
1815 {
1816 EMIT_ASM (amd64_goto,
1817 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1818 if (offset_p)
1819 *offset_p = 1;
1820 if (size_p)
1821 *size_p = 4;
1822 }
1823
1824 static void
1825 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1826 {
1827 int diff = (to - (from + size));
1828 unsigned char buf[sizeof (int)];
1829
1830 if (size != 4)
1831 {
1832 emit_error = 1;
1833 return;
1834 }
1835
1836 memcpy (buf, &diff, sizeof (int));
1837 target_write_memory (from, buf, sizeof (int));
1838 }
1839
1840 static void
1841 amd64_emit_const (LONGEST num)
1842 {
1843 unsigned char buf[16];
1844 int i;
1845 CORE_ADDR buildaddr = current_insn_ptr;
1846
1847 i = 0;
1848 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1849 memcpy (&buf[i], &num, sizeof (num));
1850 i += 8;
1851 append_insns (&buildaddr, i, buf);
1852 current_insn_ptr = buildaddr;
1853 }
1854
1855 static void
1856 amd64_emit_call (CORE_ADDR fn)
1857 {
1858 unsigned char buf[16];
1859 int i;
1860 CORE_ADDR buildaddr;
1861 LONGEST offset64;
1862
1863 /* The destination function being in the shared library, may be
1864 >31-bits away off the compiled code pad. */
1865
1866 buildaddr = current_insn_ptr;
1867
1868 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1869
1870 i = 0;
1871
1872 if (offset64 > INT_MAX || offset64 < INT_MIN)
1873 {
1874 /* Offset is too large for a call. Use callq, but that requires
1875 a register, so avoid it if possible. Use r10, since it is
1876 call-clobbered, we don't have to push/pop it. */
1877 buf[i++] = 0x48; /* mov $fn,%r10 */
1878 buf[i++] = 0xba;
1879 memcpy (buf + i, &fn, 8);
1880 i += 8;
1881 buf[i++] = 0xff; /* callq *%r10 */
1882 buf[i++] = 0xd2;
1883 }
1884 else
1885 {
1886 int offset32 = offset64; /* we know we can't overflow here. */
1887
1888 buf[i++] = 0xe8; /* call <reladdr> */
1889 memcpy (buf + i, &offset32, 4);
1890 i += 4;
1891 }
1892
1893 append_insns (&buildaddr, i, buf);
1894 current_insn_ptr = buildaddr;
1895 }
1896
1897 static void
1898 amd64_emit_reg (int reg)
1899 {
1900 unsigned char buf[16];
1901 int i;
1902 CORE_ADDR buildaddr;
1903
1904 /* Assume raw_regs is still in %rdi. */
1905 buildaddr = current_insn_ptr;
1906 i = 0;
1907 buf[i++] = 0xbe; /* mov $<n>,%esi */
1908 memcpy (&buf[i], &reg, sizeof (reg));
1909 i += 4;
1910 append_insns (&buildaddr, i, buf);
1911 current_insn_ptr = buildaddr;
1912 amd64_emit_call (get_raw_reg_func_addr ());
1913 }
1914
1915 static void
1916 amd64_emit_pop (void)
1917 {
1918 EMIT_ASM (amd64_pop,
1919 "pop %rax");
1920 }
1921
1922 static void
1923 amd64_emit_stack_flush (void)
1924 {
1925 EMIT_ASM (amd64_stack_flush,
1926 "push %rax");
1927 }
1928
1929 static void
1930 amd64_emit_zero_ext (int arg)
1931 {
1932 switch (arg)
1933 {
1934 case 8:
1935 EMIT_ASM (amd64_zero_ext_8,
1936 "and $0xff,%rax");
1937 break;
1938 case 16:
1939 EMIT_ASM (amd64_zero_ext_16,
1940 "and $0xffff,%rax");
1941 break;
1942 case 32:
1943 EMIT_ASM (amd64_zero_ext_32,
1944 "mov $0xffffffff,%rcx\n\t"
1945 "and %rcx,%rax");
1946 break;
1947 default:
1948 emit_error = 1;
1949 }
1950 }
1951
1952 static void
1953 amd64_emit_swap (void)
1954 {
1955 EMIT_ASM (amd64_swap,
1956 "mov %rax,%rcx\n\t"
1957 "pop %rax\n\t"
1958 "push %rcx");
1959 }
1960
1961 static void
1962 amd64_emit_stack_adjust (int n)
1963 {
1964 unsigned char buf[16];
1965 int i;
1966 CORE_ADDR buildaddr = current_insn_ptr;
1967
1968 i = 0;
1969 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1970 buf[i++] = 0x8d;
1971 buf[i++] = 0x64;
1972 buf[i++] = 0x24;
1973 /* This only handles adjustments up to 16, but we don't expect any more. */
1974 buf[i++] = n * 8;
1975 append_insns (&buildaddr, i, buf);
1976 current_insn_ptr = buildaddr;
1977 }
1978
1979 /* FN's prototype is `LONGEST(*fn)(int)'. */
1980
1981 static void
1982 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1983 {
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr;
1987
1988 buildaddr = current_insn_ptr;
1989 i = 0;
1990 buf[i++] = 0xbf; /* movl $<n>,%edi */
1991 memcpy (&buf[i], &arg1, sizeof (arg1));
1992 i += 4;
1993 append_insns (&buildaddr, i, buf);
1994 current_insn_ptr = buildaddr;
1995 amd64_emit_call (fn);
1996 }
1997
1998 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1999
2000 static void
2001 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2002 {
2003 unsigned char buf[16];
2004 int i;
2005 CORE_ADDR buildaddr;
2006
2007 buildaddr = current_insn_ptr;
2008 i = 0;
2009 buf[i++] = 0xbf; /* movl $<n>,%edi */
2010 memcpy (&buf[i], &arg1, sizeof (arg1));
2011 i += 4;
2012 append_insns (&buildaddr, i, buf);
2013 current_insn_ptr = buildaddr;
2014 EMIT_ASM (amd64_void_call_2_a,
2015 /* Save away a copy of the stack top. */
2016 "push %rax\n\t"
2017 /* Also pass top as the second argument. */
2018 "mov %rax,%rsi");
2019 amd64_emit_call (fn);
2020 EMIT_ASM (amd64_void_call_2_b,
2021 /* Restore the stack top, %rax may have been trashed. */
2022 "pop %rax");
2023 }
2024
2025 static void
2026 amd64_emit_eq_goto (int *offset_p, int *size_p)
2027 {
2028 EMIT_ASM (amd64_eq,
2029 "cmp %rax,(%rsp)\n\t"
2030 "jne .Lamd64_eq_fallthru\n\t"
2031 "lea 0x8(%rsp),%rsp\n\t"
2032 "pop %rax\n\t"
2033 /* jmp, but don't trust the assembler to choose the right jump */
2034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2035 ".Lamd64_eq_fallthru:\n\t"
2036 "lea 0x8(%rsp),%rsp\n\t"
2037 "pop %rax");
2038
2039 if (offset_p)
2040 *offset_p = 13;
2041 if (size_p)
2042 *size_p = 4;
2043 }
2044
2045 static void
2046 amd64_emit_ne_goto (int *offset_p, int *size_p)
2047 {
2048 EMIT_ASM (amd64_ne,
2049 "cmp %rax,(%rsp)\n\t"
2050 "je .Lamd64_ne_fallthru\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2052 "pop %rax\n\t"
2053 /* jmp, but don't trust the assembler to choose the right jump */
2054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2055 ".Lamd64_ne_fallthru:\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2057 "pop %rax");
2058
2059 if (offset_p)
2060 *offset_p = 13;
2061 if (size_p)
2062 *size_p = 4;
2063 }
2064
2065 static void
2066 amd64_emit_lt_goto (int *offset_p, int *size_p)
2067 {
2068 EMIT_ASM (amd64_lt,
2069 "cmp %rax,(%rsp)\n\t"
2070 "jnl .Lamd64_lt_fallthru\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_lt_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax");
2078
2079 if (offset_p)
2080 *offset_p = 13;
2081 if (size_p)
2082 *size_p = 4;
2083 }
2084
2085 static void
2086 amd64_emit_le_goto (int *offset_p, int *size_p)
2087 {
2088 EMIT_ASM (amd64_le,
2089 "cmp %rax,(%rsp)\n\t"
2090 "jnle .Lamd64_le_fallthru\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2092 "pop %rax\n\t"
2093 /* jmp, but don't trust the assembler to choose the right jump */
2094 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2095 ".Lamd64_le_fallthru:\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2097 "pop %rax");
2098
2099 if (offset_p)
2100 *offset_p = 13;
2101 if (size_p)
2102 *size_p = 4;
2103 }
2104
2105 static void
2106 amd64_emit_gt_goto (int *offset_p, int *size_p)
2107 {
2108 EMIT_ASM (amd64_gt,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jng .Lamd64_gt_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_gt_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123 }
2124
2125 static void
2126 amd64_emit_ge_goto (int *offset_p, int *size_p)
2127 {
2128 EMIT_ASM (amd64_ge,
2129 "cmp %rax,(%rsp)\n\t"
2130 "jnge .Lamd64_ge_fallthru\n\t"
2131 ".Lamd64_ge_jump:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_ge_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144 }
2145
2146 struct emit_ops amd64_emit_ops =
2147 {
2148 amd64_emit_prologue,
2149 amd64_emit_epilogue,
2150 amd64_emit_add,
2151 amd64_emit_sub,
2152 amd64_emit_mul,
2153 amd64_emit_lsh,
2154 amd64_emit_rsh_signed,
2155 amd64_emit_rsh_unsigned,
2156 amd64_emit_ext,
2157 amd64_emit_log_not,
2158 amd64_emit_bit_and,
2159 amd64_emit_bit_or,
2160 amd64_emit_bit_xor,
2161 amd64_emit_bit_not,
2162 amd64_emit_equal,
2163 amd64_emit_less_signed,
2164 amd64_emit_less_unsigned,
2165 amd64_emit_ref,
2166 amd64_emit_if_goto,
2167 amd64_emit_goto,
2168 amd64_write_goto_address,
2169 amd64_emit_const,
2170 amd64_emit_call,
2171 amd64_emit_reg,
2172 amd64_emit_pop,
2173 amd64_emit_stack_flush,
2174 amd64_emit_zero_ext,
2175 amd64_emit_swap,
2176 amd64_emit_stack_adjust,
2177 amd64_emit_int_call_1,
2178 amd64_emit_void_call_2,
2179 amd64_emit_eq_goto,
2180 amd64_emit_ne_goto,
2181 amd64_emit_lt_goto,
2182 amd64_emit_le_goto,
2183 amd64_emit_gt_goto,
2184 amd64_emit_ge_goto
2185 };
2186
2187 #endif /* __x86_64__ */
2188
2189 static void
2190 i386_emit_prologue (void)
2191 {
2192 EMIT_ASM32 (i386_prologue,
2193 "push %ebp\n\t"
2194 "mov %esp,%ebp\n\t"
2195 "push %ebx");
2196 /* At this point, the raw regs base address is at 8(%ebp), and the
2197 value pointer is at 12(%ebp). */
2198 }
2199
2200 static void
2201 i386_emit_epilogue (void)
2202 {
2203 EMIT_ASM32 (i386_epilogue,
2204 "mov 12(%ebp),%ecx\n\t"
2205 "mov %eax,(%ecx)\n\t"
2206 "mov %ebx,0x4(%ecx)\n\t"
2207 "xor %eax,%eax\n\t"
2208 "pop %ebx\n\t"
2209 "pop %ebp\n\t"
2210 "ret");
2211 }
2212
2213 static void
2214 i386_emit_add (void)
2215 {
2216 EMIT_ASM32 (i386_add,
2217 "add (%esp),%eax\n\t"
2218 "adc 0x4(%esp),%ebx\n\t"
2219 "lea 0x8(%esp),%esp");
2220 }
2221
2222 static void
2223 i386_emit_sub (void)
2224 {
2225 EMIT_ASM32 (i386_sub,
2226 "subl %eax,(%esp)\n\t"
2227 "sbbl %ebx,4(%esp)\n\t"
2228 "pop %eax\n\t"
2229 "pop %ebx\n\t");
2230 }
2231
2232 static void
2233 i386_emit_mul (void)
2234 {
2235 emit_error = 1;
2236 }
2237
2238 static void
2239 i386_emit_lsh (void)
2240 {
2241 emit_error = 1;
2242 }
2243
2244 static void
2245 i386_emit_rsh_signed (void)
2246 {
2247 emit_error = 1;
2248 }
2249
2250 static void
2251 i386_emit_rsh_unsigned (void)
2252 {
2253 emit_error = 1;
2254 }
2255
2256 static void
2257 i386_emit_ext (int arg)
2258 {
2259 switch (arg)
2260 {
2261 case 8:
2262 EMIT_ASM32 (i386_ext_8,
2263 "cbtw\n\t"
2264 "cwtl\n\t"
2265 "movl %eax,%ebx\n\t"
2266 "sarl $31,%ebx");
2267 break;
2268 case 16:
2269 EMIT_ASM32 (i386_ext_16,
2270 "cwtl\n\t"
2271 "movl %eax,%ebx\n\t"
2272 "sarl $31,%ebx");
2273 break;
2274 case 32:
2275 EMIT_ASM32 (i386_ext_32,
2276 "movl %eax,%ebx\n\t"
2277 "sarl $31,%ebx");
2278 break;
2279 default:
2280 emit_error = 1;
2281 }
2282 }
2283
2284 static void
2285 i386_emit_log_not (void)
2286 {
2287 EMIT_ASM32 (i386_log_not,
2288 "or %ebx,%eax\n\t"
2289 "test %eax,%eax\n\t"
2290 "sete %cl\n\t"
2291 "xor %ebx,%ebx\n\t"
2292 "movzbl %cl,%eax");
2293 }
2294
2295 static void
2296 i386_emit_bit_and (void)
2297 {
2298 EMIT_ASM32 (i386_and,
2299 "and (%esp),%eax\n\t"
2300 "and 0x4(%esp),%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2302 }
2303
2304 static void
2305 i386_emit_bit_or (void)
2306 {
2307 EMIT_ASM32 (i386_or,
2308 "or (%esp),%eax\n\t"
2309 "or 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2311 }
2312
2313 static void
2314 i386_emit_bit_xor (void)
2315 {
2316 EMIT_ASM32 (i386_xor,
2317 "xor (%esp),%eax\n\t"
2318 "xor 0x4(%esp),%ebx\n\t"
2319 "lea 0x8(%esp),%esp");
2320 }
2321
2322 static void
2323 i386_emit_bit_not (void)
2324 {
2325 EMIT_ASM32 (i386_bit_not,
2326 "xor $0xffffffff,%eax\n\t"
2327 "xor $0xffffffff,%ebx\n\t");
2328 }
2329
2330 static void
2331 i386_emit_equal (void)
2332 {
2333 EMIT_ASM32 (i386_equal,
2334 "cmpl %ebx,4(%esp)\n\t"
2335 "jne .Li386_equal_false\n\t"
2336 "cmpl %eax,(%esp)\n\t"
2337 "je .Li386_equal_true\n\t"
2338 ".Li386_equal_false:\n\t"
2339 "xor %eax,%eax\n\t"
2340 "jmp .Li386_equal_end\n\t"
2341 ".Li386_equal_true:\n\t"
2342 "mov $1,%eax\n\t"
2343 ".Li386_equal_end:\n\t"
2344 "xor %ebx,%ebx\n\t"
2345 "lea 0x8(%esp),%esp");
2346 }
2347
2348 static void
2349 i386_emit_less_signed (void)
2350 {
2351 EMIT_ASM32 (i386_less_signed,
2352 "cmpl %ebx,4(%esp)\n\t"
2353 "jl .Li386_less_signed_true\n\t"
2354 "jne .Li386_less_signed_false\n\t"
2355 "cmpl %eax,(%esp)\n\t"
2356 "jl .Li386_less_signed_true\n\t"
2357 ".Li386_less_signed_false:\n\t"
2358 "xor %eax,%eax\n\t"
2359 "jmp .Li386_less_signed_end\n\t"
2360 ".Li386_less_signed_true:\n\t"
2361 "mov $1,%eax\n\t"
2362 ".Li386_less_signed_end:\n\t"
2363 "xor %ebx,%ebx\n\t"
2364 "lea 0x8(%esp),%esp");
2365 }
2366
2367 static void
2368 i386_emit_less_unsigned (void)
2369 {
2370 EMIT_ASM32 (i386_less_unsigned,
2371 "cmpl %ebx,4(%esp)\n\t"
2372 "jb .Li386_less_unsigned_true\n\t"
2373 "jne .Li386_less_unsigned_false\n\t"
2374 "cmpl %eax,(%esp)\n\t"
2375 "jb .Li386_less_unsigned_true\n\t"
2376 ".Li386_less_unsigned_false:\n\t"
2377 "xor %eax,%eax\n\t"
2378 "jmp .Li386_less_unsigned_end\n\t"
2379 ".Li386_less_unsigned_true:\n\t"
2380 "mov $1,%eax\n\t"
2381 ".Li386_less_unsigned_end:\n\t"
2382 "xor %ebx,%ebx\n\t"
2383 "lea 0x8(%esp),%esp");
2384 }
2385
2386 static void
2387 i386_emit_ref (int size)
2388 {
2389 switch (size)
2390 {
2391 case 1:
2392 EMIT_ASM32 (i386_ref1,
2393 "movb (%eax),%al");
2394 break;
2395 case 2:
2396 EMIT_ASM32 (i386_ref2,
2397 "movw (%eax),%ax");
2398 break;
2399 case 4:
2400 EMIT_ASM32 (i386_ref4,
2401 "movl (%eax),%eax");
2402 break;
2403 case 8:
2404 EMIT_ASM32 (i386_ref8,
2405 "movl 4(%eax),%ebx\n\t"
2406 "movl (%eax),%eax");
2407 break;
2408 }
2409 }
2410
2411 static void
2412 i386_emit_if_goto (int *offset_p, int *size_p)
2413 {
2414 EMIT_ASM32 (i386_if_goto,
2415 "mov %eax,%ecx\n\t"
2416 "or %ebx,%ecx\n\t"
2417 "pop %eax\n\t"
2418 "pop %ebx\n\t"
2419 "cmpl $0,%ecx\n\t"
2420 /* Don't trust the assembler to choose the right jump */
2421 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2422
2423 if (offset_p)
2424 *offset_p = 11; /* be sure that this matches the sequence above */
2425 if (size_p)
2426 *size_p = 4;
2427 }
2428
2429 static void
2430 i386_emit_goto (int *offset_p, int *size_p)
2431 {
2432 EMIT_ASM32 (i386_goto,
2433 /* Don't trust the assembler to choose the right jump */
2434 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2435 if (offset_p)
2436 *offset_p = 1;
2437 if (size_p)
2438 *size_p = 4;
2439 }
2440
2441 static void
2442 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2443 {
2444 int diff = (to - (from + size));
2445 unsigned char buf[sizeof (int)];
2446
2447 /* We're only doing 4-byte sizes at the moment. */
2448 if (size != 4)
2449 {
2450 emit_error = 1;
2451 return;
2452 }
2453
2454 memcpy (buf, &diff, sizeof (int));
2455 target_write_memory (from, buf, sizeof (int));
2456 }
2457
2458 static void
2459 i386_emit_const (LONGEST num)
2460 {
2461 unsigned char buf[16];
2462 int i, hi, lo;
2463 CORE_ADDR buildaddr = current_insn_ptr;
2464
2465 i = 0;
2466 buf[i++] = 0xb8; /* mov $<n>,%eax */
2467 lo = num & 0xffffffff;
2468 memcpy (&buf[i], &lo, sizeof (lo));
2469 i += 4;
2470 hi = ((num >> 32) & 0xffffffff);
2471 if (hi)
2472 {
2473 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2474 memcpy (&buf[i], &hi, sizeof (hi));
2475 i += 4;
2476 }
2477 else
2478 {
2479 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2480 }
2481 append_insns (&buildaddr, i, buf);
2482 current_insn_ptr = buildaddr;
2483 }
2484
2485 static void
2486 i386_emit_call (CORE_ADDR fn)
2487 {
2488 unsigned char buf[16];
2489 int i, offset;
2490 CORE_ADDR buildaddr;
2491
2492 buildaddr = current_insn_ptr;
2493 i = 0;
2494 buf[i++] = 0xe8; /* call <reladdr> */
2495 offset = ((int) fn) - (buildaddr + 5);
2496 memcpy (buf + 1, &offset, 4);
2497 append_insns (&buildaddr, 5, buf);
2498 current_insn_ptr = buildaddr;
2499 }
2500
2501 static void
2502 i386_emit_reg (int reg)
2503 {
2504 unsigned char buf[16];
2505 int i;
2506 CORE_ADDR buildaddr;
2507
2508 EMIT_ASM32 (i386_reg_a,
2509 "sub $0x8,%esp");
2510 buildaddr = current_insn_ptr;
2511 i = 0;
2512 buf[i++] = 0xb8; /* mov $<n>,%eax */
2513 memcpy (&buf[i], &reg, sizeof (reg));
2514 i += 4;
2515 append_insns (&buildaddr, i, buf);
2516 current_insn_ptr = buildaddr;
2517 EMIT_ASM32 (i386_reg_b,
2518 "mov %eax,4(%esp)\n\t"
2519 "mov 8(%ebp),%eax\n\t"
2520 "mov %eax,(%esp)");
2521 i386_emit_call (get_raw_reg_func_addr ());
2522 EMIT_ASM32 (i386_reg_c,
2523 "xor %ebx,%ebx\n\t"
2524 "lea 0x8(%esp),%esp");
2525 }
2526
2527 static void
2528 i386_emit_pop (void)
2529 {
2530 EMIT_ASM32 (i386_pop,
2531 "pop %eax\n\t"
2532 "pop %ebx");
2533 }
2534
2535 static void
2536 i386_emit_stack_flush (void)
2537 {
2538 EMIT_ASM32 (i386_stack_flush,
2539 "push %ebx\n\t"
2540 "push %eax");
2541 }
2542
2543 static void
2544 i386_emit_zero_ext (int arg)
2545 {
2546 switch (arg)
2547 {
2548 case 8:
2549 EMIT_ASM32 (i386_zero_ext_8,
2550 "and $0xff,%eax\n\t"
2551 "xor %ebx,%ebx");
2552 break;
2553 case 16:
2554 EMIT_ASM32 (i386_zero_ext_16,
2555 "and $0xffff,%eax\n\t"
2556 "xor %ebx,%ebx");
2557 break;
2558 case 32:
2559 EMIT_ASM32 (i386_zero_ext_32,
2560 "xor %ebx,%ebx");
2561 break;
2562 default:
2563 emit_error = 1;
2564 }
2565 }
2566
2567 static void
2568 i386_emit_swap (void)
2569 {
2570 EMIT_ASM32 (i386_swap,
2571 "mov %eax,%ecx\n\t"
2572 "mov %ebx,%edx\n\t"
2573 "pop %eax\n\t"
2574 "pop %ebx\n\t"
2575 "push %edx\n\t"
2576 "push %ecx");
2577 }
2578
2579 static void
2580 i386_emit_stack_adjust (int n)
2581 {
2582 unsigned char buf[16];
2583 int i;
2584 CORE_ADDR buildaddr = current_insn_ptr;
2585
2586 i = 0;
2587 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2588 buf[i++] = 0x64;
2589 buf[i++] = 0x24;
2590 buf[i++] = n * 8;
2591 append_insns (&buildaddr, i, buf);
2592 current_insn_ptr = buildaddr;
2593 }
2594
2595 /* FN's prototype is `LONGEST(*fn)(int)'. */
2596
2597 static void
2598 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2599 {
2600 unsigned char buf[16];
2601 int i;
2602 CORE_ADDR buildaddr;
2603
2604 EMIT_ASM32 (i386_int_call_1_a,
2605 /* Reserve a bit of stack space. */
2606 "sub $0x8,%esp");
2607 /* Put the one argument on the stack. */
2608 buildaddr = current_insn_ptr;
2609 i = 0;
2610 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2611 buf[i++] = 0x04;
2612 buf[i++] = 0x24;
2613 memcpy (&buf[i], &arg1, sizeof (arg1));
2614 i += 4;
2615 append_insns (&buildaddr, i, buf);
2616 current_insn_ptr = buildaddr;
2617 i386_emit_call (fn);
2618 EMIT_ASM32 (i386_int_call_1_c,
2619 "mov %edx,%ebx\n\t"
2620 "lea 0x8(%esp),%esp");
2621 }
2622
2623 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2624
2625 static void
2626 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2627 {
2628 unsigned char buf[16];
2629 int i;
2630 CORE_ADDR buildaddr;
2631
2632 EMIT_ASM32 (i386_void_call_2_a,
2633 /* Preserve %eax only; we don't have to worry about %ebx. */
2634 "push %eax\n\t"
2635 /* Reserve a bit of stack space for arguments. */
2636 "sub $0x10,%esp\n\t"
2637 /* Copy "top" to the second argument position. (Note that
2638 we can't assume function won't scribble on its
2639 arguments, so don't try to restore from this.) */
2640 "mov %eax,4(%esp)\n\t"
2641 "mov %ebx,8(%esp)");
2642 /* Put the first argument on the stack. */
2643 buildaddr = current_insn_ptr;
2644 i = 0;
2645 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2646 buf[i++] = 0x04;
2647 buf[i++] = 0x24;
2648 memcpy (&buf[i], &arg1, sizeof (arg1));
2649 i += 4;
2650 append_insns (&buildaddr, i, buf);
2651 current_insn_ptr = buildaddr;
2652 i386_emit_call (fn);
2653 EMIT_ASM32 (i386_void_call_2_b,
2654 "lea 0x10(%esp),%esp\n\t"
2655 /* Restore original stack top. */
2656 "pop %eax");
2657 }
2658
2659
2660 static void
2661 i386_emit_eq_goto (int *offset_p, int *size_p)
2662 {
2663 EMIT_ASM32 (eq,
2664 /* Check low half first, more likely to be decider */
2665 "cmpl %eax,(%esp)\n\t"
2666 "jne .Leq_fallthru\n\t"
2667 "cmpl %ebx,4(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "lea 0x8(%esp),%esp\n\t"
2670 "pop %eax\n\t"
2671 "pop %ebx\n\t"
2672 /* jmp, but don't trust the assembler to choose the right jump */
2673 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2674 ".Leq_fallthru:\n\t"
2675 "lea 0x8(%esp),%esp\n\t"
2676 "pop %eax\n\t"
2677 "pop %ebx");
2678
2679 if (offset_p)
2680 *offset_p = 18;
2681 if (size_p)
2682 *size_p = 4;
2683 }
2684
2685 static void
2686 i386_emit_ne_goto (int *offset_p, int *size_p)
2687 {
2688 EMIT_ASM32 (ne,
2689 /* Check low half first, more likely to be decider */
2690 "cmpl %eax,(%esp)\n\t"
2691 "jne .Lne_jump\n\t"
2692 "cmpl %ebx,4(%esp)\n\t"
2693 "je .Lne_fallthru\n\t"
2694 ".Lne_jump:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2696 "pop %eax\n\t"
2697 "pop %ebx\n\t"
2698 /* jmp, but don't trust the assembler to choose the right jump */
2699 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2700 ".Lne_fallthru:\n\t"
2701 "lea 0x8(%esp),%esp\n\t"
2702 "pop %eax\n\t"
2703 "pop %ebx");
2704
2705 if (offset_p)
2706 *offset_p = 18;
2707 if (size_p)
2708 *size_p = 4;
2709 }
2710
2711 static void
2712 i386_emit_lt_goto (int *offset_p, int *size_p)
2713 {
2714 EMIT_ASM32 (lt,
2715 "cmpl %ebx,4(%esp)\n\t"
2716 "jl .Llt_jump\n\t"
2717 "jne .Llt_fallthru\n\t"
2718 "cmpl %eax,(%esp)\n\t"
2719 "jnl .Llt_fallthru\n\t"
2720 ".Llt_jump:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2722 "pop %eax\n\t"
2723 "pop %ebx\n\t"
2724 /* jmp, but don't trust the assembler to choose the right jump */
2725 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2726 ".Llt_fallthru:\n\t"
2727 "lea 0x8(%esp),%esp\n\t"
2728 "pop %eax\n\t"
2729 "pop %ebx");
2730
2731 if (offset_p)
2732 *offset_p = 20;
2733 if (size_p)
2734 *size_p = 4;
2735 }
2736
2737 static void
2738 i386_emit_le_goto (int *offset_p, int *size_p)
2739 {
2740 EMIT_ASM32 (le,
2741 "cmpl %ebx,4(%esp)\n\t"
2742 "jle .Lle_jump\n\t"
2743 "jne .Lle_fallthru\n\t"
2744 "cmpl %eax,(%esp)\n\t"
2745 "jnle .Lle_fallthru\n\t"
2746 ".Lle_jump:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2748 "pop %eax\n\t"
2749 "pop %ebx\n\t"
2750 /* jmp, but don't trust the assembler to choose the right jump */
2751 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2752 ".Lle_fallthru:\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx");
2756
2757 if (offset_p)
2758 *offset_p = 20;
2759 if (size_p)
2760 *size_p = 4;
2761 }
2762
2763 static void
2764 i386_emit_gt_goto (int *offset_p, int *size_p)
2765 {
2766 EMIT_ASM32 (gt,
2767 "cmpl %ebx,4(%esp)\n\t"
2768 "jg .Lgt_jump\n\t"
2769 "jne .Lgt_fallthru\n\t"
2770 "cmpl %eax,(%esp)\n\t"
2771 "jng .Lgt_fallthru\n\t"
2772 ".Lgt_jump:\n\t"
2773 "lea 0x8(%esp),%esp\n\t"
2774 "pop %eax\n\t"
2775 "pop %ebx\n\t"
2776 /* jmp, but don't trust the assembler to choose the right jump */
2777 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2778 ".Lgt_fallthru:\n\t"
2779 "lea 0x8(%esp),%esp\n\t"
2780 "pop %eax\n\t"
2781 "pop %ebx");
2782
2783 if (offset_p)
2784 *offset_p = 20;
2785 if (size_p)
2786 *size_p = 4;
2787 }
2788
2789 static void
2790 i386_emit_ge_goto (int *offset_p, int *size_p)
2791 {
2792 EMIT_ASM32 (ge,
2793 "cmpl %ebx,4(%esp)\n\t"
2794 "jge .Lge_jump\n\t"
2795 "jne .Lge_fallthru\n\t"
2796 "cmpl %eax,(%esp)\n\t"
2797 "jnge .Lge_fallthru\n\t"
2798 ".Lge_jump:\n\t"
2799 "lea 0x8(%esp),%esp\n\t"
2800 "pop %eax\n\t"
2801 "pop %ebx\n\t"
2802 /* jmp, but don't trust the assembler to choose the right jump */
2803 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2804 ".Lge_fallthru:\n\t"
2805 "lea 0x8(%esp),%esp\n\t"
2806 "pop %eax\n\t"
2807 "pop %ebx");
2808
2809 if (offset_p)
2810 *offset_p = 20;
2811 if (size_p)
2812 *size_p = 4;
2813 }
2814
2815 struct emit_ops i386_emit_ops =
2816 {
2817 i386_emit_prologue,
2818 i386_emit_epilogue,
2819 i386_emit_add,
2820 i386_emit_sub,
2821 i386_emit_mul,
2822 i386_emit_lsh,
2823 i386_emit_rsh_signed,
2824 i386_emit_rsh_unsigned,
2825 i386_emit_ext,
2826 i386_emit_log_not,
2827 i386_emit_bit_and,
2828 i386_emit_bit_or,
2829 i386_emit_bit_xor,
2830 i386_emit_bit_not,
2831 i386_emit_equal,
2832 i386_emit_less_signed,
2833 i386_emit_less_unsigned,
2834 i386_emit_ref,
2835 i386_emit_if_goto,
2836 i386_emit_goto,
2837 i386_write_goto_address,
2838 i386_emit_const,
2839 i386_emit_call,
2840 i386_emit_reg,
2841 i386_emit_pop,
2842 i386_emit_stack_flush,
2843 i386_emit_zero_ext,
2844 i386_emit_swap,
2845 i386_emit_stack_adjust,
2846 i386_emit_int_call_1,
2847 i386_emit_void_call_2,
2848 i386_emit_eq_goto,
2849 i386_emit_ne_goto,
2850 i386_emit_lt_goto,
2851 i386_emit_le_goto,
2852 i386_emit_gt_goto,
2853 i386_emit_ge_goto
2854 };
2855
2856
2857 static struct emit_ops *
2858 x86_emit_ops (void)
2859 {
2860 #ifdef __x86_64__
2861 if (is_64bit_tdesc ())
2862 return &amd64_emit_ops;
2863 else
2864 #endif
2865 return &i386_emit_ops;
2866 }
2867
2868 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2869
2870 const gdb_byte *
2871 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2872 {
2873 *size = x86_breakpoint_len;
2874 return x86_breakpoint;
2875 }
2876
2877 static int
2878 x86_supports_range_stepping (void)
2879 {
2880 return 1;
2881 }
2882
2883 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2884 */
2885
2886 static int
2887 x86_supports_hardware_single_step (void)
2888 {
2889 return 1;
2890 }
2891
2892 static int
2893 x86_get_ipa_tdesc_idx (void)
2894 {
2895 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2896 const struct target_desc *tdesc = regcache->tdesc;
2897
2898 #ifdef __x86_64__
2899 return amd64_get_ipa_tdesc_idx (tdesc);
2900 #endif
2901
2902 if (tdesc == tdesc_i386_linux_no_xml)
2903 return X86_TDESC_SSE;
2904
2905 return i386_get_ipa_tdesc_idx (tdesc);
2906 }
2907
2908 /* This is initialized assuming an amd64 target.
2909 x86_arch_setup will correct it for i386 or amd64 targets. */
2910
2911 struct linux_target_ops the_low_target =
2912 {
2913 x86_supports_z_point_type,
2914 x86_insert_point,
2915 x86_remove_point,
2916 x86_stopped_by_watchpoint,
2917 x86_stopped_data_address,
2918 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2919 native i386 case (no registers smaller than an xfer unit), and are not
2920 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2921 NULL,
2922 NULL,
2923 /* need to fix up i386 siginfo if host is amd64 */
2924 x86_siginfo_fixup,
2925 x86_linux_new_process,
2926 x86_linux_delete_process,
2927 x86_linux_new_thread,
2928 x86_linux_delete_thread,
2929 x86_linux_new_fork,
2930 x86_linux_prepare_to_resume,
2931 x86_linux_process_qsupported,
2932 x86_supports_tracepoints,
2933 x86_get_thread_area,
2934 x86_install_fast_tracepoint_jump_pad,
2935 x86_emit_ops,
2936 x86_get_min_fast_tracepoint_insn_len,
2937 x86_supports_range_stepping,
2938 x86_supports_hardware_single_step,
2939 x86_get_syscall_trapinfo,
2940 x86_get_ipa_tdesc_idx,
2941 };
2942
2943 /* The linux target ops object. */
2944
2945 linux_process_target *the_linux_target = &the_x86_target;
2946
2947 void
2948 initialize_low_arch (void)
2949 {
2950 /* Initialize the Linux target descriptions. */
2951 #ifdef __x86_64__
2952 tdesc_amd64_linux_no_xml = allocate_target_description ();
2953 copy_target_description (tdesc_amd64_linux_no_xml,
2954 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2955 false));
2956 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2957 #endif
2958
2959 tdesc_i386_linux_no_xml = allocate_target_description ();
2960 copy_target_description (tdesc_i386_linux_no_xml,
2961 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2962 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2963
2964 initialize_regsets_info (&x86_regsets_info);
2965 }
This page took 0.09067 seconds and 5 git commands to generate.