d33833daa6c466d0c291010603a49ecb214b80fe
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
107 const regs_info *get_regs_info () override;
108
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
111 bool supports_z_point_type (char z_type) override;
112
113 protected:
114
115 void low_arch_setup () override;
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
126
127 int low_decr_pc_after_break () override;
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
144 };
145
146 /* The singleton target ops object. */
147
148 static x86_target the_x86_target;
149
150 /* Per-process arch-specific data we want to keep. */
151
152 struct arch_process_info
153 {
154 struct x86_debug_reg_state debug_reg_state;
155 };
156
157 #ifdef __x86_64__
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap[] =
163 {
164 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
165 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
166 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
167 DS * 8, ES * 8, FS * 8, GS * 8
168 };
169
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
174 #define REGSIZE 8
175
176 static const int x86_64_regmap[] =
177 {
178 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
179 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
180 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
181 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
182 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
183 DS * 8, ES * 8, FS * 8, GS * 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 ORIG_RAX * 8,
190 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
191 21 * 8, 22 * 8,
192 #else
193 -1, -1,
194 #endif
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1 /* pkru */
207 };
208
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
211
212 #else /* ! __x86_64__ */
213
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap[] =
217 {
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222 };
223
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
226 #define REGSIZE 4
227
228 #endif
229
230 #ifdef __x86_64__
231
232 /* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235 static int
236 is_64bit_tdesc (void)
237 {
238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
239
240 return register_size (regcache->tdesc, 0) == 8;
241 }
242
243 #endif
244
245 \f
246 /* Called by libthread_db. */
247
248 ps_err_e
249 ps_get_thread_area (struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251 {
252 #ifdef __x86_64__
253 int use_64bit = is_64bit_tdesc ();
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272 #endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
283 return PS_OK;
284 }
285 }
286
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292 static int
293 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294 {
295 #ifdef __x86_64__
296 int use_64bit = is_64bit_tdesc ();
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309 #endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
325 lwpid_of (thr),
326 (void *) (long) idx, (unsigned long) &desc) < 0)
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332 }
333
334
335 \f
336 bool
337 x86_target::low_cannot_store_register (int regno)
338 {
339 #ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return false;
342 #endif
343
344 return regno >= I386_NUM_REGS;
345 }
346
347 bool
348 x86_target::low_cannot_fetch_register (int regno)
349 {
350 #ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return false;
353 #endif
354
355 return regno >= I386_NUM_REGS;
356 }
357
358 static void
359 x86_fill_gregset (struct regcache *regcache, void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369
370 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
371 {
372 unsigned long base;
373 int lwpid = lwpid_of (current_thread);
374
375 collect_register_by_name (regcache, "fs_base", &base);
376 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
377
378 collect_register_by_name (regcache, "gs_base", &base);
379 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
380 }
381 #endif
382
383 return;
384 }
385
386 /* 32-bit inferior registers need to be zero-extended.
387 Callers would read uninitialized memory otherwise. */
388 memset (buf, 0x00, X86_64_USER_REGS * 8);
389 #endif
390
391 for (i = 0; i < I386_NUM_REGS; i++)
392 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
393
394 collect_register_by_name (regcache, "orig_eax",
395 ((char *) buf) + ORIG_EAX * REGSIZE);
396
397 #ifdef __x86_64__
398 /* Sign extend EAX value to avoid potential syscall restart
399 problems.
400
401 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
402 for a detailed explanation. */
403 if (register_size (regcache->tdesc, 0) == 4)
404 {
405 void *ptr = ((gdb_byte *) buf
406 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
407
408 *(int64_t *) ptr = *(int32_t *) ptr;
409 }
410 #endif
411 }
412
413 static void
414 x86_store_gregset (struct regcache *regcache, const void *buf)
415 {
416 int i;
417
418 #ifdef __x86_64__
419 if (register_size (regcache->tdesc, 0) == 8)
420 {
421 for (i = 0; i < X86_64_NUM_REGS; i++)
422 if (x86_64_regmap[i] != -1)
423 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
424
425 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
426 {
427 unsigned long base;
428 int lwpid = lwpid_of (current_thread);
429
430 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
431 supply_register_by_name (regcache, "fs_base", &base);
432
433 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
434 supply_register_by_name (regcache, "gs_base", &base);
435 }
436 #endif
437 return;
438 }
439 #endif
440
441 for (i = 0; i < I386_NUM_REGS; i++)
442 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
443
444 supply_register_by_name (regcache, "orig_eax",
445 ((char *) buf) + ORIG_EAX * REGSIZE);
446 }
447
448 static void
449 x86_fill_fpregset (struct regcache *regcache, void *buf)
450 {
451 #ifdef __x86_64__
452 i387_cache_to_fxsave (regcache, buf);
453 #else
454 i387_cache_to_fsave (regcache, buf);
455 #endif
456 }
457
458 static void
459 x86_store_fpregset (struct regcache *regcache, const void *buf)
460 {
461 #ifdef __x86_64__
462 i387_fxsave_to_cache (regcache, buf);
463 #else
464 i387_fsave_to_cache (regcache, buf);
465 #endif
466 }
467
468 #ifndef __x86_64__
469
470 static void
471 x86_fill_fpxregset (struct regcache *regcache, void *buf)
472 {
473 i387_cache_to_fxsave (regcache, buf);
474 }
475
476 static void
477 x86_store_fpxregset (struct regcache *regcache, const void *buf)
478 {
479 i387_fxsave_to_cache (regcache, buf);
480 }
481
482 #endif
483
484 static void
485 x86_fill_xstateregset (struct regcache *regcache, void *buf)
486 {
487 i387_cache_to_xsave (regcache, buf);
488 }
489
490 static void
491 x86_store_xstateregset (struct regcache *regcache, const void *buf)
492 {
493 i387_xsave_to_cache (regcache, buf);
494 }
495
496 /* ??? The non-biarch i386 case stores all the i387 regs twice.
497 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
498 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
499 doesn't work. IWBN to avoid the duplication in the case where it
500 does work. Maybe the arch_setup routine could check whether it works
501 and update the supported regsets accordingly. */
502
503 static struct regset_info x86_regsets[] =
504 {
505 #ifdef HAVE_PTRACE_GETREGS
506 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
507 GENERAL_REGS,
508 x86_fill_gregset, x86_store_gregset },
509 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
510 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
511 # ifndef __x86_64__
512 # ifdef HAVE_PTRACE_GETFPXREGS
513 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
514 EXTENDED_REGS,
515 x86_fill_fpxregset, x86_store_fpxregset },
516 # endif
517 # endif
518 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
519 FP_REGS,
520 x86_fill_fpregset, x86_store_fpregset },
521 #endif /* HAVE_PTRACE_GETREGS */
522 NULL_REGSET
523 };
524
525 bool
526 x86_target::low_supports_breakpoints ()
527 {
528 return true;
529 }
530
531 CORE_ADDR
532 x86_target::low_get_pc (regcache *regcache)
533 {
534 int use_64bit = register_size (regcache->tdesc, 0) == 8;
535
536 if (use_64bit)
537 {
538 uint64_t pc;
539
540 collect_register_by_name (regcache, "rip", &pc);
541 return (CORE_ADDR) pc;
542 }
543 else
544 {
545 uint32_t pc;
546
547 collect_register_by_name (regcache, "eip", &pc);
548 return (CORE_ADDR) pc;
549 }
550 }
551
552 void
553 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
554 {
555 int use_64bit = register_size (regcache->tdesc, 0) == 8;
556
557 if (use_64bit)
558 {
559 uint64_t newpc = pc;
560
561 supply_register_by_name (regcache, "rip", &newpc);
562 }
563 else
564 {
565 uint32_t newpc = pc;
566
567 supply_register_by_name (regcache, "eip", &newpc);
568 }
569 }
570
571 int
572 x86_target::low_decr_pc_after_break ()
573 {
574 return 1;
575 }
576
577 \f
578 static const gdb_byte x86_breakpoint[] = { 0xCC };
579 #define x86_breakpoint_len 1
580
581 bool
582 x86_target::low_breakpoint_at (CORE_ADDR pc)
583 {
584 unsigned char c;
585
586 read_memory (pc, &c, 1);
587 if (c == 0xCC)
588 return true;
589
590 return false;
591 }
592 \f
593 /* Low-level function vector. */
594 struct x86_dr_low_type x86_dr_low =
595 {
596 x86_linux_dr_set_control,
597 x86_linux_dr_set_addr,
598 x86_linux_dr_get_addr,
599 x86_linux_dr_get_status,
600 x86_linux_dr_get_control,
601 sizeof (void *),
602 };
603 \f
604 /* Breakpoint/Watchpoint support. */
605
606 bool
607 x86_target::supports_z_point_type (char z_type)
608 {
609 switch (z_type)
610 {
611 case Z_PACKET_SW_BP:
612 case Z_PACKET_HW_BP:
613 case Z_PACKET_WRITE_WP:
614 case Z_PACKET_ACCESS_WP:
615 return true;
616 default:
617 return false;
618 }
619 }
620
621 int
622 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
623 int size, raw_breakpoint *bp)
624 {
625 struct process_info *proc = current_process ();
626
627 switch (type)
628 {
629 case raw_bkpt_type_hw:
630 case raw_bkpt_type_write_wp:
631 case raw_bkpt_type_access_wp:
632 {
633 enum target_hw_bp_type hw_type
634 = raw_bkpt_type_to_target_hw_bp_type (type);
635 struct x86_debug_reg_state *state
636 = &proc->priv->arch_private->debug_reg_state;
637
638 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
639 }
640
641 default:
642 /* Unsupported. */
643 return 1;
644 }
645 }
646
647 int
648 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
649 int size, raw_breakpoint *bp)
650 {
651 struct process_info *proc = current_process ();
652
653 switch (type)
654 {
655 case raw_bkpt_type_hw:
656 case raw_bkpt_type_write_wp:
657 case raw_bkpt_type_access_wp:
658 {
659 enum target_hw_bp_type hw_type
660 = raw_bkpt_type_to_target_hw_bp_type (type);
661 struct x86_debug_reg_state *state
662 = &proc->priv->arch_private->debug_reg_state;
663
664 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
665 }
666 default:
667 /* Unsupported. */
668 return 1;
669 }
670 }
671
672 bool
673 x86_target::low_stopped_by_watchpoint ()
674 {
675 struct process_info *proc = current_process ();
676 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
677 }
678
679 CORE_ADDR
680 x86_target::low_stopped_data_address ()
681 {
682 struct process_info *proc = current_process ();
683 CORE_ADDR addr;
684 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
685 &addr))
686 return addr;
687 return 0;
688 }
689 \f
690 /* Called when a new process is created. */
691
692 static struct arch_process_info *
693 x86_linux_new_process (void)
694 {
695 struct arch_process_info *info = XCNEW (struct arch_process_info);
696
697 x86_low_init_dregs (&info->debug_reg_state);
698
699 return info;
700 }
701
702 /* Called when a process is being deleted. */
703
704 static void
705 x86_linux_delete_process (struct arch_process_info *info)
706 {
707 xfree (info);
708 }
709
710 /* Target routine for linux_new_fork. */
711
712 static void
713 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
714 {
715 /* These are allocated by linux_add_process. */
716 gdb_assert (parent->priv != NULL
717 && parent->priv->arch_private != NULL);
718 gdb_assert (child->priv != NULL
719 && child->priv->arch_private != NULL);
720
721 /* Linux kernel before 2.6.33 commit
722 72f674d203cd230426437cdcf7dd6f681dad8b0d
723 will inherit hardware debug registers from parent
724 on fork/vfork/clone. Newer Linux kernels create such tasks with
725 zeroed debug registers.
726
727 GDB core assumes the child inherits the watchpoints/hw
728 breakpoints of the parent, and will remove them all from the
729 forked off process. Copy the debug registers mirrors into the
730 new process so that all breakpoints and watchpoints can be
731 removed together. The debug registers mirror will become zeroed
732 in the end before detaching the forked off process, thus making
733 this compatible with older Linux kernels too. */
734
735 *child->priv->arch_private = *parent->priv->arch_private;
736 }
737
738 /* See nat/x86-dregs.h. */
739
740 struct x86_debug_reg_state *
741 x86_debug_reg_state (pid_t pid)
742 {
743 struct process_info *proc = find_process_pid (pid);
744
745 return &proc->priv->arch_private->debug_reg_state;
746 }
747 \f
748 /* When GDBSERVER is built as a 64-bit application on linux, the
749 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
750 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
751 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
752 conversion in-place ourselves. */
753
754 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
755 layout of the inferiors' architecture. Returns true if any
756 conversion was done; false otherwise. If DIRECTION is 1, then copy
757 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
758 INF. */
759
760 static int
761 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
762 {
763 #ifdef __x86_64__
764 unsigned int machine;
765 int tid = lwpid_of (current_thread);
766 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
767
768 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
769 if (!is_64bit_tdesc ())
770 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
771 FIXUP_32);
772 /* No fixup for native x32 GDB. */
773 else if (!is_elf64 && sizeof (void *) == 8)
774 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
775 FIXUP_X32);
776 #endif
777
778 return 0;
779 }
780 \f
781 static int use_xml;
782
783 /* Format of XSAVE extended state is:
784 struct
785 {
786 fxsave_bytes[0..463]
787 sw_usable_bytes[464..511]
788 xstate_hdr_bytes[512..575]
789 avx_bytes[576..831]
790 future_state etc
791 };
792
793 Same memory layout will be used for the coredump NT_X86_XSTATE
794 representing the XSAVE extended state registers.
795
796 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
797 extended state mask, which is the same as the extended control register
798 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
799 together with the mask saved in the xstate_hdr_bytes to determine what
800 states the processor/OS supports and what state, used or initialized,
801 the process/thread is in. */
802 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
803
804 /* Does the current host support the GETFPXREGS request? The header
805 file may or may not define it, and even if it is defined, the
806 kernel will return EIO if it's running on a pre-SSE processor. */
807 int have_ptrace_getfpxregs =
808 #ifdef HAVE_PTRACE_GETFPXREGS
809 -1
810 #else
811 0
812 #endif
813 ;
814
815 /* Get Linux/x86 target description from running target. */
816
817 static const struct target_desc *
818 x86_linux_read_description (void)
819 {
820 unsigned int machine;
821 int is_elf64;
822 int xcr0_features;
823 int tid;
824 static uint64_t xcr0;
825 struct regset_info *regset;
826
827 tid = lwpid_of (current_thread);
828
829 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
830
831 if (sizeof (void *) == 4)
832 {
833 if (is_elf64 > 0)
834 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
835 #ifndef __x86_64__
836 else if (machine == EM_X86_64)
837 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
838 #endif
839 }
840
841 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
842 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
843 {
844 elf_fpxregset_t fpxregs;
845
846 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
847 {
848 have_ptrace_getfpxregs = 0;
849 have_ptrace_getregset = 0;
850 return i386_linux_read_description (X86_XSTATE_X87);
851 }
852 else
853 have_ptrace_getfpxregs = 1;
854 }
855 #endif
856
857 if (!use_xml)
858 {
859 x86_xcr0 = X86_XSTATE_SSE_MASK;
860
861 /* Don't use XML. */
862 #ifdef __x86_64__
863 if (machine == EM_X86_64)
864 return tdesc_amd64_linux_no_xml;
865 else
866 #endif
867 return tdesc_i386_linux_no_xml;
868 }
869
870 if (have_ptrace_getregset == -1)
871 {
872 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
873 struct iovec iov;
874
875 iov.iov_base = xstateregs;
876 iov.iov_len = sizeof (xstateregs);
877
878 /* Check if PTRACE_GETREGSET works. */
879 if (ptrace (PTRACE_GETREGSET, tid,
880 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
881 have_ptrace_getregset = 0;
882 else
883 {
884 have_ptrace_getregset = 1;
885
886 /* Get XCR0 from XSAVE extended state. */
887 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
888 / sizeof (uint64_t))];
889
890 /* Use PTRACE_GETREGSET if it is available. */
891 for (regset = x86_regsets;
892 regset->fill_function != NULL; regset++)
893 if (regset->get_request == PTRACE_GETREGSET)
894 regset->size = X86_XSTATE_SIZE (xcr0);
895 else if (regset->type != GENERAL_REGS)
896 regset->size = 0;
897 }
898 }
899
900 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
901 xcr0_features = (have_ptrace_getregset
902 && (xcr0 & X86_XSTATE_ALL_MASK));
903
904 if (xcr0_features)
905 x86_xcr0 = xcr0;
906
907 if (machine == EM_X86_64)
908 {
909 #ifdef __x86_64__
910 const target_desc *tdesc = NULL;
911
912 if (xcr0_features)
913 {
914 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
915 !is_elf64);
916 }
917
918 if (tdesc == NULL)
919 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
920 return tdesc;
921 #endif
922 }
923 else
924 {
925 const target_desc *tdesc = NULL;
926
927 if (xcr0_features)
928 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
929
930 if (tdesc == NULL)
931 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
932
933 return tdesc;
934 }
935
936 gdb_assert_not_reached ("failed to return tdesc");
937 }
938
939 /* Update all the target description of all processes; a new GDB
940 connected, and it may or not support xml target descriptions. */
941
942 void
943 x86_target::update_xmltarget ()
944 {
945 struct thread_info *saved_thread = current_thread;
946
947 /* Before changing the register cache's internal layout, flush the
948 contents of the current valid caches back to the threads, and
949 release the current regcache objects. */
950 regcache_release ();
951
952 for_each_process ([this] (process_info *proc) {
953 int pid = proc->pid;
954
955 /* Look up any thread of this process. */
956 current_thread = find_any_thread_of_pid (pid);
957
958 low_arch_setup ();
959 });
960
961 current_thread = saved_thread;
962 }
963
964 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
965 PTRACE_GETREGSET. */
966
967 static void
968 x86_linux_process_qsupported (char **features, int count)
969 {
970 int i;
971
972 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
973 with "i386" in qSupported query, it supports x86 XML target
974 descriptions. */
975 use_xml = 0;
976 for (i = 0; i < count; i++)
977 {
978 const char *feature = features[i];
979
980 if (startswith (feature, "xmlRegisters="))
981 {
982 char *copy = xstrdup (feature + 13);
983
984 char *saveptr;
985 for (char *p = strtok_r (copy, ",", &saveptr);
986 p != NULL;
987 p = strtok_r (NULL, ",", &saveptr))
988 {
989 if (strcmp (p, "i386") == 0)
990 {
991 use_xml = 1;
992 break;
993 }
994 }
995
996 free (copy);
997 }
998 }
999 the_x86_target.update_xmltarget ();
1000 }
1001
1002 /* Common for x86/x86-64. */
1003
1004 static struct regsets_info x86_regsets_info =
1005 {
1006 x86_regsets, /* regsets */
1007 0, /* num_regsets */
1008 NULL, /* disabled_regsets */
1009 };
1010
1011 #ifdef __x86_64__
1012 static struct regs_info amd64_linux_regs_info =
1013 {
1014 NULL, /* regset_bitmap */
1015 NULL, /* usrregs_info */
1016 &x86_regsets_info
1017 };
1018 #endif
1019 static struct usrregs_info i386_linux_usrregs_info =
1020 {
1021 I386_NUM_REGS,
1022 i386_regmap,
1023 };
1024
1025 static struct regs_info i386_linux_regs_info =
1026 {
1027 NULL, /* regset_bitmap */
1028 &i386_linux_usrregs_info,
1029 &x86_regsets_info
1030 };
1031
1032 const regs_info *
1033 x86_target::get_regs_info ()
1034 {
1035 #ifdef __x86_64__
1036 if (is_64bit_tdesc ())
1037 return &amd64_linux_regs_info;
1038 else
1039 #endif
1040 return &i386_linux_regs_info;
1041 }
1042
1043 /* Initialize the target description for the architecture of the
1044 inferior. */
1045
1046 void
1047 x86_target::low_arch_setup ()
1048 {
1049 current_process ()->tdesc = x86_linux_read_description ();
1050 }
1051
1052 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1053 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1054
1055 static void
1056 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1057 {
1058 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1059
1060 if (use_64bit)
1061 {
1062 long l_sysno;
1063
1064 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1065 *sysno = (int) l_sysno;
1066 }
1067 else
1068 collect_register_by_name (regcache, "orig_eax", sysno);
1069 }
1070
1071 static int
1072 x86_supports_tracepoints (void)
1073 {
1074 return 1;
1075 }
1076
1077 static void
1078 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1079 {
1080 target_write_memory (*to, buf, len);
1081 *to += len;
1082 }
1083
1084 static int
1085 push_opcode (unsigned char *buf, const char *op)
1086 {
1087 unsigned char *buf_org = buf;
1088
1089 while (1)
1090 {
1091 char *endptr;
1092 unsigned long ul = strtoul (op, &endptr, 16);
1093
1094 if (endptr == op)
1095 break;
1096
1097 *buf++ = ul;
1098 op = endptr;
1099 }
1100
1101 return buf - buf_org;
1102 }
1103
1104 #ifdef __x86_64__
1105
1106 /* Build a jump pad that saves registers and calls a collection
1107 function. Writes a jump instruction to the jump pad to
1108 JJUMPAD_INSN. The caller is responsible to write it in at the
1109 tracepoint address. */
1110
1111 static int
1112 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1113 CORE_ADDR collector,
1114 CORE_ADDR lockaddr,
1115 ULONGEST orig_size,
1116 CORE_ADDR *jump_entry,
1117 CORE_ADDR *trampoline,
1118 ULONGEST *trampoline_size,
1119 unsigned char *jjump_pad_insn,
1120 ULONGEST *jjump_pad_insn_size,
1121 CORE_ADDR *adjusted_insn_addr,
1122 CORE_ADDR *adjusted_insn_addr_end,
1123 char *err)
1124 {
1125 unsigned char buf[40];
1126 int i, offset;
1127 int64_t loffset;
1128
1129 CORE_ADDR buildaddr = *jump_entry;
1130
1131 /* Build the jump pad. */
1132
1133 /* First, do tracepoint data collection. Save registers. */
1134 i = 0;
1135 /* Need to ensure stack pointer saved first. */
1136 buf[i++] = 0x54; /* push %rsp */
1137 buf[i++] = 0x55; /* push %rbp */
1138 buf[i++] = 0x57; /* push %rdi */
1139 buf[i++] = 0x56; /* push %rsi */
1140 buf[i++] = 0x52; /* push %rdx */
1141 buf[i++] = 0x51; /* push %rcx */
1142 buf[i++] = 0x53; /* push %rbx */
1143 buf[i++] = 0x50; /* push %rax */
1144 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1145 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1146 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1147 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1148 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1149 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1150 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1151 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1152 buf[i++] = 0x9c; /* pushfq */
1153 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1154 buf[i++] = 0xbf;
1155 memcpy (buf + i, &tpaddr, 8);
1156 i += 8;
1157 buf[i++] = 0x57; /* push %rdi */
1158 append_insns (&buildaddr, i, buf);
1159
1160 /* Stack space for the collecting_t object. */
1161 i = 0;
1162 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1163 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1164 memcpy (buf + i, &tpoint, 8);
1165 i += 8;
1166 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1167 i += push_opcode (&buf[i],
1168 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1169 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1170 append_insns (&buildaddr, i, buf);
1171
1172 /* spin-lock. */
1173 i = 0;
1174 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1175 memcpy (&buf[i], (void *) &lockaddr, 8);
1176 i += 8;
1177 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1178 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1179 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1180 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1181 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1182 append_insns (&buildaddr, i, buf);
1183
1184 /* Set up the gdb_collect call. */
1185 /* At this point, (stack pointer + 0x18) is the base of our saved
1186 register block. */
1187
1188 i = 0;
1189 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1190 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1191
1192 /* tpoint address may be 64-bit wide. */
1193 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1194 memcpy (buf + i, &tpoint, 8);
1195 i += 8;
1196 append_insns (&buildaddr, i, buf);
1197
1198 /* The collector function being in the shared library, may be
1199 >31-bits away off the jump pad. */
1200 i = 0;
1201 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1202 memcpy (buf + i, &collector, 8);
1203 i += 8;
1204 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1205 append_insns (&buildaddr, i, buf);
1206
1207 /* Clear the spin-lock. */
1208 i = 0;
1209 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1210 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1211 memcpy (buf + i, &lockaddr, 8);
1212 i += 8;
1213 append_insns (&buildaddr, i, buf);
1214
1215 /* Remove stack that had been used for the collect_t object. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1218 append_insns (&buildaddr, i, buf);
1219
1220 /* Restore register state. */
1221 i = 0;
1222 buf[i++] = 0x48; /* add $0x8,%rsp */
1223 buf[i++] = 0x83;
1224 buf[i++] = 0xc4;
1225 buf[i++] = 0x08;
1226 buf[i++] = 0x9d; /* popfq */
1227 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1228 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1229 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1230 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1231 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1232 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1233 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1234 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1235 buf[i++] = 0x58; /* pop %rax */
1236 buf[i++] = 0x5b; /* pop %rbx */
1237 buf[i++] = 0x59; /* pop %rcx */
1238 buf[i++] = 0x5a; /* pop %rdx */
1239 buf[i++] = 0x5e; /* pop %rsi */
1240 buf[i++] = 0x5f; /* pop %rdi */
1241 buf[i++] = 0x5d; /* pop %rbp */
1242 buf[i++] = 0x5c; /* pop %rsp */
1243 append_insns (&buildaddr, i, buf);
1244
1245 /* Now, adjust the original instruction to execute in the jump
1246 pad. */
1247 *adjusted_insn_addr = buildaddr;
1248 relocate_instruction (&buildaddr, tpaddr);
1249 *adjusted_insn_addr_end = buildaddr;
1250
1251 /* Finally, write a jump back to the program. */
1252
1253 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1254 if (loffset > INT_MAX || loffset < INT_MIN)
1255 {
1256 sprintf (err,
1257 "E.Jump back from jump pad too far from tracepoint "
1258 "(offset 0x%" PRIx64 " > int32).", loffset);
1259 return 1;
1260 }
1261
1262 offset = (int) loffset;
1263 memcpy (buf, jump_insn, sizeof (jump_insn));
1264 memcpy (buf + 1, &offset, 4);
1265 append_insns (&buildaddr, sizeof (jump_insn), buf);
1266
1267 /* The jump pad is now built. Wire in a jump to our jump pad. This
1268 is always done last (by our caller actually), so that we can
1269 install fast tracepoints with threads running. This relies on
1270 the agent's atomic write support. */
1271 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1272 if (loffset > INT_MAX || loffset < INT_MIN)
1273 {
1274 sprintf (err,
1275 "E.Jump pad too far from tracepoint "
1276 "(offset 0x%" PRIx64 " > int32).", loffset);
1277 return 1;
1278 }
1279
1280 offset = (int) loffset;
1281
1282 memcpy (buf, jump_insn, sizeof (jump_insn));
1283 memcpy (buf + 1, &offset, 4);
1284 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1285 *jjump_pad_insn_size = sizeof (jump_insn);
1286
1287 /* Return the end address of our pad. */
1288 *jump_entry = buildaddr;
1289
1290 return 0;
1291 }
1292
1293 #endif /* __x86_64__ */
1294
1295 /* Build a jump pad that saves registers and calls a collection
1296 function. Writes a jump instruction to the jump pad to
1297 JJUMPAD_INSN. The caller is responsible to write it in at the
1298 tracepoint address. */
1299
1300 static int
1301 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1302 CORE_ADDR collector,
1303 CORE_ADDR lockaddr,
1304 ULONGEST orig_size,
1305 CORE_ADDR *jump_entry,
1306 CORE_ADDR *trampoline,
1307 ULONGEST *trampoline_size,
1308 unsigned char *jjump_pad_insn,
1309 ULONGEST *jjump_pad_insn_size,
1310 CORE_ADDR *adjusted_insn_addr,
1311 CORE_ADDR *adjusted_insn_addr_end,
1312 char *err)
1313 {
1314 unsigned char buf[0x100];
1315 int i, offset;
1316 CORE_ADDR buildaddr = *jump_entry;
1317
1318 /* Build the jump pad. */
1319
1320 /* First, do tracepoint data collection. Save registers. */
1321 i = 0;
1322 buf[i++] = 0x60; /* pushad */
1323 buf[i++] = 0x68; /* push tpaddr aka $pc */
1324 *((int *)(buf + i)) = (int) tpaddr;
1325 i += 4;
1326 buf[i++] = 0x9c; /* pushf */
1327 buf[i++] = 0x1e; /* push %ds */
1328 buf[i++] = 0x06; /* push %es */
1329 buf[i++] = 0x0f; /* push %fs */
1330 buf[i++] = 0xa0;
1331 buf[i++] = 0x0f; /* push %gs */
1332 buf[i++] = 0xa8;
1333 buf[i++] = 0x16; /* push %ss */
1334 buf[i++] = 0x0e; /* push %cs */
1335 append_insns (&buildaddr, i, buf);
1336
1337 /* Stack space for the collecting_t object. */
1338 i = 0;
1339 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1340
1341 /* Build the object. */
1342 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1343 memcpy (buf + i, &tpoint, 4);
1344 i += 4;
1345 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1346
1347 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1348 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1349 append_insns (&buildaddr, i, buf);
1350
1351 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1352 If we cared for it, this could be using xchg alternatively. */
1353
1354 i = 0;
1355 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1356 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1357 %esp,<lockaddr> */
1358 memcpy (&buf[i], (void *) &lockaddr, 4);
1359 i += 4;
1360 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1361 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1362 append_insns (&buildaddr, i, buf);
1363
1364
1365 /* Set up arguments to the gdb_collect call. */
1366 i = 0;
1367 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1368 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1369 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1370 append_insns (&buildaddr, i, buf);
1371
1372 i = 0;
1373 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1374 append_insns (&buildaddr, i, buf);
1375
1376 i = 0;
1377 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1378 memcpy (&buf[i], (void *) &tpoint, 4);
1379 i += 4;
1380 append_insns (&buildaddr, i, buf);
1381
1382 buf[0] = 0xe8; /* call <reladdr> */
1383 offset = collector - (buildaddr + sizeof (jump_insn));
1384 memcpy (buf + 1, &offset, 4);
1385 append_insns (&buildaddr, 5, buf);
1386 /* Clean up after the call. */
1387 buf[0] = 0x83; /* add $0x8,%esp */
1388 buf[1] = 0xc4;
1389 buf[2] = 0x08;
1390 append_insns (&buildaddr, 3, buf);
1391
1392
1393 /* Clear the spin-lock. This would need the LOCK prefix on older
1394 broken archs. */
1395 i = 0;
1396 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1397 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1398 memcpy (buf + i, &lockaddr, 4);
1399 i += 4;
1400 append_insns (&buildaddr, i, buf);
1401
1402
1403 /* Remove stack that had been used for the collect_t object. */
1404 i = 0;
1405 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1406 append_insns (&buildaddr, i, buf);
1407
1408 i = 0;
1409 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1410 buf[i++] = 0xc4;
1411 buf[i++] = 0x04;
1412 buf[i++] = 0x17; /* pop %ss */
1413 buf[i++] = 0x0f; /* pop %gs */
1414 buf[i++] = 0xa9;
1415 buf[i++] = 0x0f; /* pop %fs */
1416 buf[i++] = 0xa1;
1417 buf[i++] = 0x07; /* pop %es */
1418 buf[i++] = 0x1f; /* pop %ds */
1419 buf[i++] = 0x9d; /* popf */
1420 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1421 buf[i++] = 0xc4;
1422 buf[i++] = 0x04;
1423 buf[i++] = 0x61; /* popad */
1424 append_insns (&buildaddr, i, buf);
1425
1426 /* Now, adjust the original instruction to execute in the jump
1427 pad. */
1428 *adjusted_insn_addr = buildaddr;
1429 relocate_instruction (&buildaddr, tpaddr);
1430 *adjusted_insn_addr_end = buildaddr;
1431
1432 /* Write the jump back to the program. */
1433 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1434 memcpy (buf, jump_insn, sizeof (jump_insn));
1435 memcpy (buf + 1, &offset, 4);
1436 append_insns (&buildaddr, sizeof (jump_insn), buf);
1437
1438 /* The jump pad is now built. Wire in a jump to our jump pad. This
1439 is always done last (by our caller actually), so that we can
1440 install fast tracepoints with threads running. This relies on
1441 the agent's atomic write support. */
1442 if (orig_size == 4)
1443 {
1444 /* Create a trampoline. */
1445 *trampoline_size = sizeof (jump_insn);
1446 if (!claim_trampoline_space (*trampoline_size, trampoline))
1447 {
1448 /* No trampoline space available. */
1449 strcpy (err,
1450 "E.Cannot allocate trampoline space needed for fast "
1451 "tracepoints on 4-byte instructions.");
1452 return 1;
1453 }
1454
1455 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1456 memcpy (buf, jump_insn, sizeof (jump_insn));
1457 memcpy (buf + 1, &offset, 4);
1458 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1459
1460 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1461 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1462 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1463 memcpy (buf + 2, &offset, 2);
1464 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1465 *jjump_pad_insn_size = sizeof (small_jump_insn);
1466 }
1467 else
1468 {
1469 /* Else use a 32-bit relative jump instruction. */
1470 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1471 memcpy (buf, jump_insn, sizeof (jump_insn));
1472 memcpy (buf + 1, &offset, 4);
1473 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1474 *jjump_pad_insn_size = sizeof (jump_insn);
1475 }
1476
1477 /* Return the end address of our pad. */
1478 *jump_entry = buildaddr;
1479
1480 return 0;
1481 }
1482
1483 static int
1484 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1485 CORE_ADDR collector,
1486 CORE_ADDR lockaddr,
1487 ULONGEST orig_size,
1488 CORE_ADDR *jump_entry,
1489 CORE_ADDR *trampoline,
1490 ULONGEST *trampoline_size,
1491 unsigned char *jjump_pad_insn,
1492 ULONGEST *jjump_pad_insn_size,
1493 CORE_ADDR *adjusted_insn_addr,
1494 CORE_ADDR *adjusted_insn_addr_end,
1495 char *err)
1496 {
1497 #ifdef __x86_64__
1498 if (is_64bit_tdesc ())
1499 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1500 collector, lockaddr,
1501 orig_size, jump_entry,
1502 trampoline, trampoline_size,
1503 jjump_pad_insn,
1504 jjump_pad_insn_size,
1505 adjusted_insn_addr,
1506 adjusted_insn_addr_end,
1507 err);
1508 #endif
1509
1510 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1511 collector, lockaddr,
1512 orig_size, jump_entry,
1513 trampoline, trampoline_size,
1514 jjump_pad_insn,
1515 jjump_pad_insn_size,
1516 adjusted_insn_addr,
1517 adjusted_insn_addr_end,
1518 err);
1519 }
1520
1521 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1522 architectures. */
1523
1524 static int
1525 x86_get_min_fast_tracepoint_insn_len (void)
1526 {
1527 static int warned_about_fast_tracepoints = 0;
1528
1529 #ifdef __x86_64__
1530 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1531 used for fast tracepoints. */
1532 if (is_64bit_tdesc ())
1533 return 5;
1534 #endif
1535
1536 if (agent_loaded_p ())
1537 {
1538 char errbuf[IPA_BUFSIZ];
1539
1540 errbuf[0] = '\0';
1541
1542 /* On x86, if trampolines are available, then 4-byte jump instructions
1543 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1544 with a 4-byte offset are used instead. */
1545 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1546 return 4;
1547 else
1548 {
1549 /* GDB has no channel to explain to user why a shorter fast
1550 tracepoint is not possible, but at least make GDBserver
1551 mention that something has gone awry. */
1552 if (!warned_about_fast_tracepoints)
1553 {
1554 warning ("4-byte fast tracepoints not available; %s", errbuf);
1555 warned_about_fast_tracepoints = 1;
1556 }
1557 return 5;
1558 }
1559 }
1560 else
1561 {
1562 /* Indicate that the minimum length is currently unknown since the IPA
1563 has not loaded yet. */
1564 return 0;
1565 }
1566 }
1567
1568 static void
1569 add_insns (unsigned char *start, int len)
1570 {
1571 CORE_ADDR buildaddr = current_insn_ptr;
1572
1573 if (debug_threads)
1574 debug_printf ("Adding %d bytes of insn at %s\n",
1575 len, paddress (buildaddr));
1576
1577 append_insns (&buildaddr, len, start);
1578 current_insn_ptr = buildaddr;
1579 }
1580
1581 /* Our general strategy for emitting code is to avoid specifying raw
1582 bytes whenever possible, and instead copy a block of inline asm
1583 that is embedded in the function. This is a little messy, because
1584 we need to keep the compiler from discarding what looks like dead
1585 code, plus suppress various warnings. */
1586
1587 #define EMIT_ASM(NAME, INSNS) \
1588 do \
1589 { \
1590 extern unsigned char start_ ## NAME, end_ ## NAME; \
1591 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1592 __asm__ ("jmp end_" #NAME "\n" \
1593 "\t" "start_" #NAME ":" \
1594 "\t" INSNS "\n" \
1595 "\t" "end_" #NAME ":"); \
1596 } while (0)
1597
1598 #ifdef __x86_64__
1599
1600 #define EMIT_ASM32(NAME,INSNS) \
1601 do \
1602 { \
1603 extern unsigned char start_ ## NAME, end_ ## NAME; \
1604 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1605 __asm__ (".code32\n" \
1606 "\t" "jmp end_" #NAME "\n" \
1607 "\t" "start_" #NAME ":\n" \
1608 "\t" INSNS "\n" \
1609 "\t" "end_" #NAME ":\n" \
1610 ".code64\n"); \
1611 } while (0)
1612
1613 #else
1614
1615 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1616
1617 #endif
1618
1619 #ifdef __x86_64__
1620
1621 static void
1622 amd64_emit_prologue (void)
1623 {
1624 EMIT_ASM (amd64_prologue,
1625 "pushq %rbp\n\t"
1626 "movq %rsp,%rbp\n\t"
1627 "sub $0x20,%rsp\n\t"
1628 "movq %rdi,-8(%rbp)\n\t"
1629 "movq %rsi,-16(%rbp)");
1630 }
1631
1632
1633 static void
1634 amd64_emit_epilogue (void)
1635 {
1636 EMIT_ASM (amd64_epilogue,
1637 "movq -16(%rbp),%rdi\n\t"
1638 "movq %rax,(%rdi)\n\t"
1639 "xor %rax,%rax\n\t"
1640 "leave\n\t"
1641 "ret");
1642 }
1643
1644 static void
1645 amd64_emit_add (void)
1646 {
1647 EMIT_ASM (amd64_add,
1648 "add (%rsp),%rax\n\t"
1649 "lea 0x8(%rsp),%rsp");
1650 }
1651
1652 static void
1653 amd64_emit_sub (void)
1654 {
1655 EMIT_ASM (amd64_sub,
1656 "sub %rax,(%rsp)\n\t"
1657 "pop %rax");
1658 }
1659
1660 static void
1661 amd64_emit_mul (void)
1662 {
1663 emit_error = 1;
1664 }
1665
1666 static void
1667 amd64_emit_lsh (void)
1668 {
1669 emit_error = 1;
1670 }
1671
1672 static void
1673 amd64_emit_rsh_signed (void)
1674 {
1675 emit_error = 1;
1676 }
1677
1678 static void
1679 amd64_emit_rsh_unsigned (void)
1680 {
1681 emit_error = 1;
1682 }
1683
1684 static void
1685 amd64_emit_ext (int arg)
1686 {
1687 switch (arg)
1688 {
1689 case 8:
1690 EMIT_ASM (amd64_ext_8,
1691 "cbtw\n\t"
1692 "cwtl\n\t"
1693 "cltq");
1694 break;
1695 case 16:
1696 EMIT_ASM (amd64_ext_16,
1697 "cwtl\n\t"
1698 "cltq");
1699 break;
1700 case 32:
1701 EMIT_ASM (amd64_ext_32,
1702 "cltq");
1703 break;
1704 default:
1705 emit_error = 1;
1706 }
1707 }
1708
1709 static void
1710 amd64_emit_log_not (void)
1711 {
1712 EMIT_ASM (amd64_log_not,
1713 "test %rax,%rax\n\t"
1714 "sete %cl\n\t"
1715 "movzbq %cl,%rax");
1716 }
1717
1718 static void
1719 amd64_emit_bit_and (void)
1720 {
1721 EMIT_ASM (amd64_and,
1722 "and (%rsp),%rax\n\t"
1723 "lea 0x8(%rsp),%rsp");
1724 }
1725
1726 static void
1727 amd64_emit_bit_or (void)
1728 {
1729 EMIT_ASM (amd64_or,
1730 "or (%rsp),%rax\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732 }
1733
1734 static void
1735 amd64_emit_bit_xor (void)
1736 {
1737 EMIT_ASM (amd64_xor,
1738 "xor (%rsp),%rax\n\t"
1739 "lea 0x8(%rsp),%rsp");
1740 }
1741
1742 static void
1743 amd64_emit_bit_not (void)
1744 {
1745 EMIT_ASM (amd64_bit_not,
1746 "xorq $0xffffffffffffffff,%rax");
1747 }
1748
1749 static void
1750 amd64_emit_equal (void)
1751 {
1752 EMIT_ASM (amd64_equal,
1753 "cmp %rax,(%rsp)\n\t"
1754 "je .Lamd64_equal_true\n\t"
1755 "xor %rax,%rax\n\t"
1756 "jmp .Lamd64_equal_end\n\t"
1757 ".Lamd64_equal_true:\n\t"
1758 "mov $0x1,%rax\n\t"
1759 ".Lamd64_equal_end:\n\t"
1760 "lea 0x8(%rsp),%rsp");
1761 }
1762
1763 static void
1764 amd64_emit_less_signed (void)
1765 {
1766 EMIT_ASM (amd64_less_signed,
1767 "cmp %rax,(%rsp)\n\t"
1768 "jl .Lamd64_less_signed_true\n\t"
1769 "xor %rax,%rax\n\t"
1770 "jmp .Lamd64_less_signed_end\n\t"
1771 ".Lamd64_less_signed_true:\n\t"
1772 "mov $1,%rax\n\t"
1773 ".Lamd64_less_signed_end:\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775 }
1776
1777 static void
1778 amd64_emit_less_unsigned (void)
1779 {
1780 EMIT_ASM (amd64_less_unsigned,
1781 "cmp %rax,(%rsp)\n\t"
1782 "jb .Lamd64_less_unsigned_true\n\t"
1783 "xor %rax,%rax\n\t"
1784 "jmp .Lamd64_less_unsigned_end\n\t"
1785 ".Lamd64_less_unsigned_true:\n\t"
1786 "mov $1,%rax\n\t"
1787 ".Lamd64_less_unsigned_end:\n\t"
1788 "lea 0x8(%rsp),%rsp");
1789 }
1790
1791 static void
1792 amd64_emit_ref (int size)
1793 {
1794 switch (size)
1795 {
1796 case 1:
1797 EMIT_ASM (amd64_ref1,
1798 "movb (%rax),%al");
1799 break;
1800 case 2:
1801 EMIT_ASM (amd64_ref2,
1802 "movw (%rax),%ax");
1803 break;
1804 case 4:
1805 EMIT_ASM (amd64_ref4,
1806 "movl (%rax),%eax");
1807 break;
1808 case 8:
1809 EMIT_ASM (amd64_ref8,
1810 "movq (%rax),%rax");
1811 break;
1812 }
1813 }
1814
1815 static void
1816 amd64_emit_if_goto (int *offset_p, int *size_p)
1817 {
1818 EMIT_ASM (amd64_if_goto,
1819 "mov %rax,%rcx\n\t"
1820 "pop %rax\n\t"
1821 "cmp $0,%rcx\n\t"
1822 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1823 if (offset_p)
1824 *offset_p = 10;
1825 if (size_p)
1826 *size_p = 4;
1827 }
1828
1829 static void
1830 amd64_emit_goto (int *offset_p, int *size_p)
1831 {
1832 EMIT_ASM (amd64_goto,
1833 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1834 if (offset_p)
1835 *offset_p = 1;
1836 if (size_p)
1837 *size_p = 4;
1838 }
1839
1840 static void
1841 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1842 {
1843 int diff = (to - (from + size));
1844 unsigned char buf[sizeof (int)];
1845
1846 if (size != 4)
1847 {
1848 emit_error = 1;
1849 return;
1850 }
1851
1852 memcpy (buf, &diff, sizeof (int));
1853 target_write_memory (from, buf, sizeof (int));
1854 }
1855
1856 static void
1857 amd64_emit_const (LONGEST num)
1858 {
1859 unsigned char buf[16];
1860 int i;
1861 CORE_ADDR buildaddr = current_insn_ptr;
1862
1863 i = 0;
1864 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1865 memcpy (&buf[i], &num, sizeof (num));
1866 i += 8;
1867 append_insns (&buildaddr, i, buf);
1868 current_insn_ptr = buildaddr;
1869 }
1870
1871 static void
1872 amd64_emit_call (CORE_ADDR fn)
1873 {
1874 unsigned char buf[16];
1875 int i;
1876 CORE_ADDR buildaddr;
1877 LONGEST offset64;
1878
1879 /* The destination function being in the shared library, may be
1880 >31-bits away off the compiled code pad. */
1881
1882 buildaddr = current_insn_ptr;
1883
1884 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1885
1886 i = 0;
1887
1888 if (offset64 > INT_MAX || offset64 < INT_MIN)
1889 {
1890 /* Offset is too large for a call. Use callq, but that requires
1891 a register, so avoid it if possible. Use r10, since it is
1892 call-clobbered, we don't have to push/pop it. */
1893 buf[i++] = 0x48; /* mov $fn,%r10 */
1894 buf[i++] = 0xba;
1895 memcpy (buf + i, &fn, 8);
1896 i += 8;
1897 buf[i++] = 0xff; /* callq *%r10 */
1898 buf[i++] = 0xd2;
1899 }
1900 else
1901 {
1902 int offset32 = offset64; /* we know we can't overflow here. */
1903
1904 buf[i++] = 0xe8; /* call <reladdr> */
1905 memcpy (buf + i, &offset32, 4);
1906 i += 4;
1907 }
1908
1909 append_insns (&buildaddr, i, buf);
1910 current_insn_ptr = buildaddr;
1911 }
1912
1913 static void
1914 amd64_emit_reg (int reg)
1915 {
1916 unsigned char buf[16];
1917 int i;
1918 CORE_ADDR buildaddr;
1919
1920 /* Assume raw_regs is still in %rdi. */
1921 buildaddr = current_insn_ptr;
1922 i = 0;
1923 buf[i++] = 0xbe; /* mov $<n>,%esi */
1924 memcpy (&buf[i], &reg, sizeof (reg));
1925 i += 4;
1926 append_insns (&buildaddr, i, buf);
1927 current_insn_ptr = buildaddr;
1928 amd64_emit_call (get_raw_reg_func_addr ());
1929 }
1930
1931 static void
1932 amd64_emit_pop (void)
1933 {
1934 EMIT_ASM (amd64_pop,
1935 "pop %rax");
1936 }
1937
1938 static void
1939 amd64_emit_stack_flush (void)
1940 {
1941 EMIT_ASM (amd64_stack_flush,
1942 "push %rax");
1943 }
1944
1945 static void
1946 amd64_emit_zero_ext (int arg)
1947 {
1948 switch (arg)
1949 {
1950 case 8:
1951 EMIT_ASM (amd64_zero_ext_8,
1952 "and $0xff,%rax");
1953 break;
1954 case 16:
1955 EMIT_ASM (amd64_zero_ext_16,
1956 "and $0xffff,%rax");
1957 break;
1958 case 32:
1959 EMIT_ASM (amd64_zero_ext_32,
1960 "mov $0xffffffff,%rcx\n\t"
1961 "and %rcx,%rax");
1962 break;
1963 default:
1964 emit_error = 1;
1965 }
1966 }
1967
1968 static void
1969 amd64_emit_swap (void)
1970 {
1971 EMIT_ASM (amd64_swap,
1972 "mov %rax,%rcx\n\t"
1973 "pop %rax\n\t"
1974 "push %rcx");
1975 }
1976
1977 static void
1978 amd64_emit_stack_adjust (int n)
1979 {
1980 unsigned char buf[16];
1981 int i;
1982 CORE_ADDR buildaddr = current_insn_ptr;
1983
1984 i = 0;
1985 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1986 buf[i++] = 0x8d;
1987 buf[i++] = 0x64;
1988 buf[i++] = 0x24;
1989 /* This only handles adjustments up to 16, but we don't expect any more. */
1990 buf[i++] = n * 8;
1991 append_insns (&buildaddr, i, buf);
1992 current_insn_ptr = buildaddr;
1993 }
1994
1995 /* FN's prototype is `LONGEST(*fn)(int)'. */
1996
1997 static void
1998 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1999 {
2000 unsigned char buf[16];
2001 int i;
2002 CORE_ADDR buildaddr;
2003
2004 buildaddr = current_insn_ptr;
2005 i = 0;
2006 buf[i++] = 0xbf; /* movl $<n>,%edi */
2007 memcpy (&buf[i], &arg1, sizeof (arg1));
2008 i += 4;
2009 append_insns (&buildaddr, i, buf);
2010 current_insn_ptr = buildaddr;
2011 amd64_emit_call (fn);
2012 }
2013
2014 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2015
2016 static void
2017 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2018 {
2019 unsigned char buf[16];
2020 int i;
2021 CORE_ADDR buildaddr;
2022
2023 buildaddr = current_insn_ptr;
2024 i = 0;
2025 buf[i++] = 0xbf; /* movl $<n>,%edi */
2026 memcpy (&buf[i], &arg1, sizeof (arg1));
2027 i += 4;
2028 append_insns (&buildaddr, i, buf);
2029 current_insn_ptr = buildaddr;
2030 EMIT_ASM (amd64_void_call_2_a,
2031 /* Save away a copy of the stack top. */
2032 "push %rax\n\t"
2033 /* Also pass top as the second argument. */
2034 "mov %rax,%rsi");
2035 amd64_emit_call (fn);
2036 EMIT_ASM (amd64_void_call_2_b,
2037 /* Restore the stack top, %rax may have been trashed. */
2038 "pop %rax");
2039 }
2040
2041 static void
2042 amd64_emit_eq_goto (int *offset_p, int *size_p)
2043 {
2044 EMIT_ASM (amd64_eq,
2045 "cmp %rax,(%rsp)\n\t"
2046 "jne .Lamd64_eq_fallthru\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2048 "pop %rax\n\t"
2049 /* jmp, but don't trust the assembler to choose the right jump */
2050 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2051 ".Lamd64_eq_fallthru:\n\t"
2052 "lea 0x8(%rsp),%rsp\n\t"
2053 "pop %rax");
2054
2055 if (offset_p)
2056 *offset_p = 13;
2057 if (size_p)
2058 *size_p = 4;
2059 }
2060
2061 static void
2062 amd64_emit_ne_goto (int *offset_p, int *size_p)
2063 {
2064 EMIT_ASM (amd64_ne,
2065 "cmp %rax,(%rsp)\n\t"
2066 "je .Lamd64_ne_fallthru\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2068 "pop %rax\n\t"
2069 /* jmp, but don't trust the assembler to choose the right jump */
2070 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2071 ".Lamd64_ne_fallthru:\n\t"
2072 "lea 0x8(%rsp),%rsp\n\t"
2073 "pop %rax");
2074
2075 if (offset_p)
2076 *offset_p = 13;
2077 if (size_p)
2078 *size_p = 4;
2079 }
2080
2081 static void
2082 amd64_emit_lt_goto (int *offset_p, int *size_p)
2083 {
2084 EMIT_ASM (amd64_lt,
2085 "cmp %rax,(%rsp)\n\t"
2086 "jnl .Lamd64_lt_fallthru\n\t"
2087 "lea 0x8(%rsp),%rsp\n\t"
2088 "pop %rax\n\t"
2089 /* jmp, but don't trust the assembler to choose the right jump */
2090 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2091 ".Lamd64_lt_fallthru:\n\t"
2092 "lea 0x8(%rsp),%rsp\n\t"
2093 "pop %rax");
2094
2095 if (offset_p)
2096 *offset_p = 13;
2097 if (size_p)
2098 *size_p = 4;
2099 }
2100
2101 static void
2102 amd64_emit_le_goto (int *offset_p, int *size_p)
2103 {
2104 EMIT_ASM (amd64_le,
2105 "cmp %rax,(%rsp)\n\t"
2106 "jnle .Lamd64_le_fallthru\n\t"
2107 "lea 0x8(%rsp),%rsp\n\t"
2108 "pop %rax\n\t"
2109 /* jmp, but don't trust the assembler to choose the right jump */
2110 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2111 ".Lamd64_le_fallthru:\n\t"
2112 "lea 0x8(%rsp),%rsp\n\t"
2113 "pop %rax");
2114
2115 if (offset_p)
2116 *offset_p = 13;
2117 if (size_p)
2118 *size_p = 4;
2119 }
2120
2121 static void
2122 amd64_emit_gt_goto (int *offset_p, int *size_p)
2123 {
2124 EMIT_ASM (amd64_gt,
2125 "cmp %rax,(%rsp)\n\t"
2126 "jng .Lamd64_gt_fallthru\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax\n\t"
2129 /* jmp, but don't trust the assembler to choose the right jump */
2130 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2131 ".Lamd64_gt_fallthru:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax");
2134
2135 if (offset_p)
2136 *offset_p = 13;
2137 if (size_p)
2138 *size_p = 4;
2139 }
2140
2141 static void
2142 amd64_emit_ge_goto (int *offset_p, int *size_p)
2143 {
2144 EMIT_ASM (amd64_ge,
2145 "cmp %rax,(%rsp)\n\t"
2146 "jnge .Lamd64_ge_fallthru\n\t"
2147 ".Lamd64_ge_jump:\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2149 "pop %rax\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_ge_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax");
2155
2156 if (offset_p)
2157 *offset_p = 13;
2158 if (size_p)
2159 *size_p = 4;
2160 }
2161
2162 struct emit_ops amd64_emit_ops =
2163 {
2164 amd64_emit_prologue,
2165 amd64_emit_epilogue,
2166 amd64_emit_add,
2167 amd64_emit_sub,
2168 amd64_emit_mul,
2169 amd64_emit_lsh,
2170 amd64_emit_rsh_signed,
2171 amd64_emit_rsh_unsigned,
2172 amd64_emit_ext,
2173 amd64_emit_log_not,
2174 amd64_emit_bit_and,
2175 amd64_emit_bit_or,
2176 amd64_emit_bit_xor,
2177 amd64_emit_bit_not,
2178 amd64_emit_equal,
2179 amd64_emit_less_signed,
2180 amd64_emit_less_unsigned,
2181 amd64_emit_ref,
2182 amd64_emit_if_goto,
2183 amd64_emit_goto,
2184 amd64_write_goto_address,
2185 amd64_emit_const,
2186 amd64_emit_call,
2187 amd64_emit_reg,
2188 amd64_emit_pop,
2189 amd64_emit_stack_flush,
2190 amd64_emit_zero_ext,
2191 amd64_emit_swap,
2192 amd64_emit_stack_adjust,
2193 amd64_emit_int_call_1,
2194 amd64_emit_void_call_2,
2195 amd64_emit_eq_goto,
2196 amd64_emit_ne_goto,
2197 amd64_emit_lt_goto,
2198 amd64_emit_le_goto,
2199 amd64_emit_gt_goto,
2200 amd64_emit_ge_goto
2201 };
2202
2203 #endif /* __x86_64__ */
2204
2205 static void
2206 i386_emit_prologue (void)
2207 {
2208 EMIT_ASM32 (i386_prologue,
2209 "push %ebp\n\t"
2210 "mov %esp,%ebp\n\t"
2211 "push %ebx");
2212 /* At this point, the raw regs base address is at 8(%ebp), and the
2213 value pointer is at 12(%ebp). */
2214 }
2215
2216 static void
2217 i386_emit_epilogue (void)
2218 {
2219 EMIT_ASM32 (i386_epilogue,
2220 "mov 12(%ebp),%ecx\n\t"
2221 "mov %eax,(%ecx)\n\t"
2222 "mov %ebx,0x4(%ecx)\n\t"
2223 "xor %eax,%eax\n\t"
2224 "pop %ebx\n\t"
2225 "pop %ebp\n\t"
2226 "ret");
2227 }
2228
2229 static void
2230 i386_emit_add (void)
2231 {
2232 EMIT_ASM32 (i386_add,
2233 "add (%esp),%eax\n\t"
2234 "adc 0x4(%esp),%ebx\n\t"
2235 "lea 0x8(%esp),%esp");
2236 }
2237
2238 static void
2239 i386_emit_sub (void)
2240 {
2241 EMIT_ASM32 (i386_sub,
2242 "subl %eax,(%esp)\n\t"
2243 "sbbl %ebx,4(%esp)\n\t"
2244 "pop %eax\n\t"
2245 "pop %ebx\n\t");
2246 }
2247
2248 static void
2249 i386_emit_mul (void)
2250 {
2251 emit_error = 1;
2252 }
2253
2254 static void
2255 i386_emit_lsh (void)
2256 {
2257 emit_error = 1;
2258 }
2259
2260 static void
2261 i386_emit_rsh_signed (void)
2262 {
2263 emit_error = 1;
2264 }
2265
2266 static void
2267 i386_emit_rsh_unsigned (void)
2268 {
2269 emit_error = 1;
2270 }
2271
2272 static void
2273 i386_emit_ext (int arg)
2274 {
2275 switch (arg)
2276 {
2277 case 8:
2278 EMIT_ASM32 (i386_ext_8,
2279 "cbtw\n\t"
2280 "cwtl\n\t"
2281 "movl %eax,%ebx\n\t"
2282 "sarl $31,%ebx");
2283 break;
2284 case 16:
2285 EMIT_ASM32 (i386_ext_16,
2286 "cwtl\n\t"
2287 "movl %eax,%ebx\n\t"
2288 "sarl $31,%ebx");
2289 break;
2290 case 32:
2291 EMIT_ASM32 (i386_ext_32,
2292 "movl %eax,%ebx\n\t"
2293 "sarl $31,%ebx");
2294 break;
2295 default:
2296 emit_error = 1;
2297 }
2298 }
2299
2300 static void
2301 i386_emit_log_not (void)
2302 {
2303 EMIT_ASM32 (i386_log_not,
2304 "or %ebx,%eax\n\t"
2305 "test %eax,%eax\n\t"
2306 "sete %cl\n\t"
2307 "xor %ebx,%ebx\n\t"
2308 "movzbl %cl,%eax");
2309 }
2310
2311 static void
2312 i386_emit_bit_and (void)
2313 {
2314 EMIT_ASM32 (i386_and,
2315 "and (%esp),%eax\n\t"
2316 "and 0x4(%esp),%ebx\n\t"
2317 "lea 0x8(%esp),%esp");
2318 }
2319
2320 static void
2321 i386_emit_bit_or (void)
2322 {
2323 EMIT_ASM32 (i386_or,
2324 "or (%esp),%eax\n\t"
2325 "or 0x4(%esp),%ebx\n\t"
2326 "lea 0x8(%esp),%esp");
2327 }
2328
2329 static void
2330 i386_emit_bit_xor (void)
2331 {
2332 EMIT_ASM32 (i386_xor,
2333 "xor (%esp),%eax\n\t"
2334 "xor 0x4(%esp),%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336 }
2337
2338 static void
2339 i386_emit_bit_not (void)
2340 {
2341 EMIT_ASM32 (i386_bit_not,
2342 "xor $0xffffffff,%eax\n\t"
2343 "xor $0xffffffff,%ebx\n\t");
2344 }
2345
2346 static void
2347 i386_emit_equal (void)
2348 {
2349 EMIT_ASM32 (i386_equal,
2350 "cmpl %ebx,4(%esp)\n\t"
2351 "jne .Li386_equal_false\n\t"
2352 "cmpl %eax,(%esp)\n\t"
2353 "je .Li386_equal_true\n\t"
2354 ".Li386_equal_false:\n\t"
2355 "xor %eax,%eax\n\t"
2356 "jmp .Li386_equal_end\n\t"
2357 ".Li386_equal_true:\n\t"
2358 "mov $1,%eax\n\t"
2359 ".Li386_equal_end:\n\t"
2360 "xor %ebx,%ebx\n\t"
2361 "lea 0x8(%esp),%esp");
2362 }
2363
2364 static void
2365 i386_emit_less_signed (void)
2366 {
2367 EMIT_ASM32 (i386_less_signed,
2368 "cmpl %ebx,4(%esp)\n\t"
2369 "jl .Li386_less_signed_true\n\t"
2370 "jne .Li386_less_signed_false\n\t"
2371 "cmpl %eax,(%esp)\n\t"
2372 "jl .Li386_less_signed_true\n\t"
2373 ".Li386_less_signed_false:\n\t"
2374 "xor %eax,%eax\n\t"
2375 "jmp .Li386_less_signed_end\n\t"
2376 ".Li386_less_signed_true:\n\t"
2377 "mov $1,%eax\n\t"
2378 ".Li386_less_signed_end:\n\t"
2379 "xor %ebx,%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381 }
2382
2383 static void
2384 i386_emit_less_unsigned (void)
2385 {
2386 EMIT_ASM32 (i386_less_unsigned,
2387 "cmpl %ebx,4(%esp)\n\t"
2388 "jb .Li386_less_unsigned_true\n\t"
2389 "jne .Li386_less_unsigned_false\n\t"
2390 "cmpl %eax,(%esp)\n\t"
2391 "jb .Li386_less_unsigned_true\n\t"
2392 ".Li386_less_unsigned_false:\n\t"
2393 "xor %eax,%eax\n\t"
2394 "jmp .Li386_less_unsigned_end\n\t"
2395 ".Li386_less_unsigned_true:\n\t"
2396 "mov $1,%eax\n\t"
2397 ".Li386_less_unsigned_end:\n\t"
2398 "xor %ebx,%ebx\n\t"
2399 "lea 0x8(%esp),%esp");
2400 }
2401
2402 static void
2403 i386_emit_ref (int size)
2404 {
2405 switch (size)
2406 {
2407 case 1:
2408 EMIT_ASM32 (i386_ref1,
2409 "movb (%eax),%al");
2410 break;
2411 case 2:
2412 EMIT_ASM32 (i386_ref2,
2413 "movw (%eax),%ax");
2414 break;
2415 case 4:
2416 EMIT_ASM32 (i386_ref4,
2417 "movl (%eax),%eax");
2418 break;
2419 case 8:
2420 EMIT_ASM32 (i386_ref8,
2421 "movl 4(%eax),%ebx\n\t"
2422 "movl (%eax),%eax");
2423 break;
2424 }
2425 }
2426
2427 static void
2428 i386_emit_if_goto (int *offset_p, int *size_p)
2429 {
2430 EMIT_ASM32 (i386_if_goto,
2431 "mov %eax,%ecx\n\t"
2432 "or %ebx,%ecx\n\t"
2433 "pop %eax\n\t"
2434 "pop %ebx\n\t"
2435 "cmpl $0,%ecx\n\t"
2436 /* Don't trust the assembler to choose the right jump */
2437 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2438
2439 if (offset_p)
2440 *offset_p = 11; /* be sure that this matches the sequence above */
2441 if (size_p)
2442 *size_p = 4;
2443 }
2444
2445 static void
2446 i386_emit_goto (int *offset_p, int *size_p)
2447 {
2448 EMIT_ASM32 (i386_goto,
2449 /* Don't trust the assembler to choose the right jump */
2450 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2451 if (offset_p)
2452 *offset_p = 1;
2453 if (size_p)
2454 *size_p = 4;
2455 }
2456
2457 static void
2458 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2459 {
2460 int diff = (to - (from + size));
2461 unsigned char buf[sizeof (int)];
2462
2463 /* We're only doing 4-byte sizes at the moment. */
2464 if (size != 4)
2465 {
2466 emit_error = 1;
2467 return;
2468 }
2469
2470 memcpy (buf, &diff, sizeof (int));
2471 target_write_memory (from, buf, sizeof (int));
2472 }
2473
2474 static void
2475 i386_emit_const (LONGEST num)
2476 {
2477 unsigned char buf[16];
2478 int i, hi, lo;
2479 CORE_ADDR buildaddr = current_insn_ptr;
2480
2481 i = 0;
2482 buf[i++] = 0xb8; /* mov $<n>,%eax */
2483 lo = num & 0xffffffff;
2484 memcpy (&buf[i], &lo, sizeof (lo));
2485 i += 4;
2486 hi = ((num >> 32) & 0xffffffff);
2487 if (hi)
2488 {
2489 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2490 memcpy (&buf[i], &hi, sizeof (hi));
2491 i += 4;
2492 }
2493 else
2494 {
2495 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2496 }
2497 append_insns (&buildaddr, i, buf);
2498 current_insn_ptr = buildaddr;
2499 }
2500
2501 static void
2502 i386_emit_call (CORE_ADDR fn)
2503 {
2504 unsigned char buf[16];
2505 int i, offset;
2506 CORE_ADDR buildaddr;
2507
2508 buildaddr = current_insn_ptr;
2509 i = 0;
2510 buf[i++] = 0xe8; /* call <reladdr> */
2511 offset = ((int) fn) - (buildaddr + 5);
2512 memcpy (buf + 1, &offset, 4);
2513 append_insns (&buildaddr, 5, buf);
2514 current_insn_ptr = buildaddr;
2515 }
2516
2517 static void
2518 i386_emit_reg (int reg)
2519 {
2520 unsigned char buf[16];
2521 int i;
2522 CORE_ADDR buildaddr;
2523
2524 EMIT_ASM32 (i386_reg_a,
2525 "sub $0x8,%esp");
2526 buildaddr = current_insn_ptr;
2527 i = 0;
2528 buf[i++] = 0xb8; /* mov $<n>,%eax */
2529 memcpy (&buf[i], &reg, sizeof (reg));
2530 i += 4;
2531 append_insns (&buildaddr, i, buf);
2532 current_insn_ptr = buildaddr;
2533 EMIT_ASM32 (i386_reg_b,
2534 "mov %eax,4(%esp)\n\t"
2535 "mov 8(%ebp),%eax\n\t"
2536 "mov %eax,(%esp)");
2537 i386_emit_call (get_raw_reg_func_addr ());
2538 EMIT_ASM32 (i386_reg_c,
2539 "xor %ebx,%ebx\n\t"
2540 "lea 0x8(%esp),%esp");
2541 }
2542
2543 static void
2544 i386_emit_pop (void)
2545 {
2546 EMIT_ASM32 (i386_pop,
2547 "pop %eax\n\t"
2548 "pop %ebx");
2549 }
2550
2551 static void
2552 i386_emit_stack_flush (void)
2553 {
2554 EMIT_ASM32 (i386_stack_flush,
2555 "push %ebx\n\t"
2556 "push %eax");
2557 }
2558
2559 static void
2560 i386_emit_zero_ext (int arg)
2561 {
2562 switch (arg)
2563 {
2564 case 8:
2565 EMIT_ASM32 (i386_zero_ext_8,
2566 "and $0xff,%eax\n\t"
2567 "xor %ebx,%ebx");
2568 break;
2569 case 16:
2570 EMIT_ASM32 (i386_zero_ext_16,
2571 "and $0xffff,%eax\n\t"
2572 "xor %ebx,%ebx");
2573 break;
2574 case 32:
2575 EMIT_ASM32 (i386_zero_ext_32,
2576 "xor %ebx,%ebx");
2577 break;
2578 default:
2579 emit_error = 1;
2580 }
2581 }
2582
2583 static void
2584 i386_emit_swap (void)
2585 {
2586 EMIT_ASM32 (i386_swap,
2587 "mov %eax,%ecx\n\t"
2588 "mov %ebx,%edx\n\t"
2589 "pop %eax\n\t"
2590 "pop %ebx\n\t"
2591 "push %edx\n\t"
2592 "push %ecx");
2593 }
2594
2595 static void
2596 i386_emit_stack_adjust (int n)
2597 {
2598 unsigned char buf[16];
2599 int i;
2600 CORE_ADDR buildaddr = current_insn_ptr;
2601
2602 i = 0;
2603 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2604 buf[i++] = 0x64;
2605 buf[i++] = 0x24;
2606 buf[i++] = n * 8;
2607 append_insns (&buildaddr, i, buf);
2608 current_insn_ptr = buildaddr;
2609 }
2610
2611 /* FN's prototype is `LONGEST(*fn)(int)'. */
2612
2613 static void
2614 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2615 {
2616 unsigned char buf[16];
2617 int i;
2618 CORE_ADDR buildaddr;
2619
2620 EMIT_ASM32 (i386_int_call_1_a,
2621 /* Reserve a bit of stack space. */
2622 "sub $0x8,%esp");
2623 /* Put the one argument on the stack. */
2624 buildaddr = current_insn_ptr;
2625 i = 0;
2626 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2627 buf[i++] = 0x04;
2628 buf[i++] = 0x24;
2629 memcpy (&buf[i], &arg1, sizeof (arg1));
2630 i += 4;
2631 append_insns (&buildaddr, i, buf);
2632 current_insn_ptr = buildaddr;
2633 i386_emit_call (fn);
2634 EMIT_ASM32 (i386_int_call_1_c,
2635 "mov %edx,%ebx\n\t"
2636 "lea 0x8(%esp),%esp");
2637 }
2638
2639 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2640
2641 static void
2642 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2643 {
2644 unsigned char buf[16];
2645 int i;
2646 CORE_ADDR buildaddr;
2647
2648 EMIT_ASM32 (i386_void_call_2_a,
2649 /* Preserve %eax only; we don't have to worry about %ebx. */
2650 "push %eax\n\t"
2651 /* Reserve a bit of stack space for arguments. */
2652 "sub $0x10,%esp\n\t"
2653 /* Copy "top" to the second argument position. (Note that
2654 we can't assume function won't scribble on its
2655 arguments, so don't try to restore from this.) */
2656 "mov %eax,4(%esp)\n\t"
2657 "mov %ebx,8(%esp)");
2658 /* Put the first argument on the stack. */
2659 buildaddr = current_insn_ptr;
2660 i = 0;
2661 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2662 buf[i++] = 0x04;
2663 buf[i++] = 0x24;
2664 memcpy (&buf[i], &arg1, sizeof (arg1));
2665 i += 4;
2666 append_insns (&buildaddr, i, buf);
2667 current_insn_ptr = buildaddr;
2668 i386_emit_call (fn);
2669 EMIT_ASM32 (i386_void_call_2_b,
2670 "lea 0x10(%esp),%esp\n\t"
2671 /* Restore original stack top. */
2672 "pop %eax");
2673 }
2674
2675
2676 static void
2677 i386_emit_eq_goto (int *offset_p, int *size_p)
2678 {
2679 EMIT_ASM32 (eq,
2680 /* Check low half first, more likely to be decider */
2681 "cmpl %eax,(%esp)\n\t"
2682 "jne .Leq_fallthru\n\t"
2683 "cmpl %ebx,4(%esp)\n\t"
2684 "jne .Leq_fallthru\n\t"
2685 "lea 0x8(%esp),%esp\n\t"
2686 "pop %eax\n\t"
2687 "pop %ebx\n\t"
2688 /* jmp, but don't trust the assembler to choose the right jump */
2689 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2690 ".Leq_fallthru:\n\t"
2691 "lea 0x8(%esp),%esp\n\t"
2692 "pop %eax\n\t"
2693 "pop %ebx");
2694
2695 if (offset_p)
2696 *offset_p = 18;
2697 if (size_p)
2698 *size_p = 4;
2699 }
2700
2701 static void
2702 i386_emit_ne_goto (int *offset_p, int *size_p)
2703 {
2704 EMIT_ASM32 (ne,
2705 /* Check low half first, more likely to be decider */
2706 "cmpl %eax,(%esp)\n\t"
2707 "jne .Lne_jump\n\t"
2708 "cmpl %ebx,4(%esp)\n\t"
2709 "je .Lne_fallthru\n\t"
2710 ".Lne_jump:\n\t"
2711 "lea 0x8(%esp),%esp\n\t"
2712 "pop %eax\n\t"
2713 "pop %ebx\n\t"
2714 /* jmp, but don't trust the assembler to choose the right jump */
2715 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2716 ".Lne_fallthru:\n\t"
2717 "lea 0x8(%esp),%esp\n\t"
2718 "pop %eax\n\t"
2719 "pop %ebx");
2720
2721 if (offset_p)
2722 *offset_p = 18;
2723 if (size_p)
2724 *size_p = 4;
2725 }
2726
2727 static void
2728 i386_emit_lt_goto (int *offset_p, int *size_p)
2729 {
2730 EMIT_ASM32 (lt,
2731 "cmpl %ebx,4(%esp)\n\t"
2732 "jl .Llt_jump\n\t"
2733 "jne .Llt_fallthru\n\t"
2734 "cmpl %eax,(%esp)\n\t"
2735 "jnl .Llt_fallthru\n\t"
2736 ".Llt_jump:\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2738 "pop %eax\n\t"
2739 "pop %ebx\n\t"
2740 /* jmp, but don't trust the assembler to choose the right jump */
2741 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2742 ".Llt_fallthru:\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2744 "pop %eax\n\t"
2745 "pop %ebx");
2746
2747 if (offset_p)
2748 *offset_p = 20;
2749 if (size_p)
2750 *size_p = 4;
2751 }
2752
2753 static void
2754 i386_emit_le_goto (int *offset_p, int *size_p)
2755 {
2756 EMIT_ASM32 (le,
2757 "cmpl %ebx,4(%esp)\n\t"
2758 "jle .Lle_jump\n\t"
2759 "jne .Lle_fallthru\n\t"
2760 "cmpl %eax,(%esp)\n\t"
2761 "jnle .Lle_fallthru\n\t"
2762 ".Lle_jump:\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Lle_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx");
2772
2773 if (offset_p)
2774 *offset_p = 20;
2775 if (size_p)
2776 *size_p = 4;
2777 }
2778
2779 static void
2780 i386_emit_gt_goto (int *offset_p, int *size_p)
2781 {
2782 EMIT_ASM32 (gt,
2783 "cmpl %ebx,4(%esp)\n\t"
2784 "jg .Lgt_jump\n\t"
2785 "jne .Lgt_fallthru\n\t"
2786 "cmpl %eax,(%esp)\n\t"
2787 "jng .Lgt_fallthru\n\t"
2788 ".Lgt_jump:\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2790 "pop %eax\n\t"
2791 "pop %ebx\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Lgt_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx");
2798
2799 if (offset_p)
2800 *offset_p = 20;
2801 if (size_p)
2802 *size_p = 4;
2803 }
2804
2805 static void
2806 i386_emit_ge_goto (int *offset_p, int *size_p)
2807 {
2808 EMIT_ASM32 (ge,
2809 "cmpl %ebx,4(%esp)\n\t"
2810 "jge .Lge_jump\n\t"
2811 "jne .Lge_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnge .Lge_fallthru\n\t"
2814 ".Lge_jump:\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2816 "pop %eax\n\t"
2817 "pop %ebx\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Lge_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2822 "pop %eax\n\t"
2823 "pop %ebx");
2824
2825 if (offset_p)
2826 *offset_p = 20;
2827 if (size_p)
2828 *size_p = 4;
2829 }
2830
2831 struct emit_ops i386_emit_ops =
2832 {
2833 i386_emit_prologue,
2834 i386_emit_epilogue,
2835 i386_emit_add,
2836 i386_emit_sub,
2837 i386_emit_mul,
2838 i386_emit_lsh,
2839 i386_emit_rsh_signed,
2840 i386_emit_rsh_unsigned,
2841 i386_emit_ext,
2842 i386_emit_log_not,
2843 i386_emit_bit_and,
2844 i386_emit_bit_or,
2845 i386_emit_bit_xor,
2846 i386_emit_bit_not,
2847 i386_emit_equal,
2848 i386_emit_less_signed,
2849 i386_emit_less_unsigned,
2850 i386_emit_ref,
2851 i386_emit_if_goto,
2852 i386_emit_goto,
2853 i386_write_goto_address,
2854 i386_emit_const,
2855 i386_emit_call,
2856 i386_emit_reg,
2857 i386_emit_pop,
2858 i386_emit_stack_flush,
2859 i386_emit_zero_ext,
2860 i386_emit_swap,
2861 i386_emit_stack_adjust,
2862 i386_emit_int_call_1,
2863 i386_emit_void_call_2,
2864 i386_emit_eq_goto,
2865 i386_emit_ne_goto,
2866 i386_emit_lt_goto,
2867 i386_emit_le_goto,
2868 i386_emit_gt_goto,
2869 i386_emit_ge_goto
2870 };
2871
2872
2873 static struct emit_ops *
2874 x86_emit_ops (void)
2875 {
2876 #ifdef __x86_64__
2877 if (is_64bit_tdesc ())
2878 return &amd64_emit_ops;
2879 else
2880 #endif
2881 return &i386_emit_ops;
2882 }
2883
2884 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2885
2886 const gdb_byte *
2887 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2888 {
2889 *size = x86_breakpoint_len;
2890 return x86_breakpoint;
2891 }
2892
2893 static int
2894 x86_supports_range_stepping (void)
2895 {
2896 return 1;
2897 }
2898
2899 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2900 */
2901
2902 static int
2903 x86_supports_hardware_single_step (void)
2904 {
2905 return 1;
2906 }
2907
2908 static int
2909 x86_get_ipa_tdesc_idx (void)
2910 {
2911 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2912 const struct target_desc *tdesc = regcache->tdesc;
2913
2914 #ifdef __x86_64__
2915 return amd64_get_ipa_tdesc_idx (tdesc);
2916 #endif
2917
2918 if (tdesc == tdesc_i386_linux_no_xml)
2919 return X86_TDESC_SSE;
2920
2921 return i386_get_ipa_tdesc_idx (tdesc);
2922 }
2923
2924 /* This is initialized assuming an amd64 target.
2925 x86_arch_setup will correct it for i386 or amd64 targets. */
2926
2927 struct linux_target_ops the_low_target =
2928 {
2929 /* need to fix up i386 siginfo if host is amd64 */
2930 x86_siginfo_fixup,
2931 x86_linux_new_process,
2932 x86_linux_delete_process,
2933 x86_linux_new_thread,
2934 x86_linux_delete_thread,
2935 x86_linux_new_fork,
2936 x86_linux_prepare_to_resume,
2937 x86_linux_process_qsupported,
2938 x86_supports_tracepoints,
2939 x86_get_thread_area,
2940 x86_install_fast_tracepoint_jump_pad,
2941 x86_emit_ops,
2942 x86_get_min_fast_tracepoint_insn_len,
2943 x86_supports_range_stepping,
2944 x86_supports_hardware_single_step,
2945 x86_get_syscall_trapinfo,
2946 x86_get_ipa_tdesc_idx,
2947 };
2948
2949 /* The linux target ops object. */
2950
2951 linux_process_target *the_linux_target = &the_x86_target;
2952
2953 void
2954 initialize_low_arch (void)
2955 {
2956 /* Initialize the Linux target descriptions. */
2957 #ifdef __x86_64__
2958 tdesc_amd64_linux_no_xml = allocate_target_description ();
2959 copy_target_description (tdesc_amd64_linux_no_xml,
2960 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2961 false));
2962 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2963 #endif
2964
2965 tdesc_i386_linux_no_xml = allocate_target_description ();
2966 copy_target_description (tdesc_i386_linux_no_xml,
2967 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2968 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2969
2970 initialize_regsets_info (&x86_regsets_info);
2971 }
This page took 0.134506 seconds and 4 git commands to generate.