gdbserver/linux-low: start turning linux target ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 };
104
105 /* The singleton target ops object. */
106
107 static x86_target the_x86_target;
108
109 /* Per-process arch-specific data we want to keep. */
110
111 struct arch_process_info
112 {
113 struct x86_debug_reg_state debug_reg_state;
114 };
115
116 #ifdef __x86_64__
117
118 /* Mapping between the general-purpose registers in `struct user'
119 format and GDB's register array layout.
120 Note that the transfer layout uses 64-bit regs. */
121 static /*const*/ int i386_regmap[] =
122 {
123 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
124 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
125 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
126 DS * 8, ES * 8, FS * 8, GS * 8
127 };
128
129 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
130
131 /* So code below doesn't have to care, i386 or amd64. */
132 #define ORIG_EAX ORIG_RAX
133 #define REGSIZE 8
134
135 static const int x86_64_regmap[] =
136 {
137 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
138 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
139 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
140 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
141 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
142 DS * 8, ES * 8, FS * 8, GS * 8,
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1,
147 -1, -1, -1, -1, -1, -1, -1, -1,
148 ORIG_RAX * 8,
149 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
150 21 * 8, 22 * 8,
151 #else
152 -1, -1,
153 #endif
154 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
155 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
156 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
157 -1, -1, -1, -1, -1, -1, -1, -1,
158 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
159 -1, -1, -1, -1, -1, -1, -1, -1,
160 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
161 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1 /* pkru */
166 };
167
168 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
169 #define X86_64_USER_REGS (GS + 1)
170
171 #else /* ! __x86_64__ */
172
173 /* Mapping between the general-purpose registers in `struct user'
174 format and GDB's register array layout. */
175 static /*const*/ int i386_regmap[] =
176 {
177 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
178 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
179 EIP * 4, EFL * 4, CS * 4, SS * 4,
180 DS * 4, ES * 4, FS * 4, GS * 4
181 };
182
183 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
184
185 #define REGSIZE 4
186
187 #endif
188
189 #ifdef __x86_64__
190
191 /* Returns true if the current inferior belongs to a x86-64 process,
192 per the tdesc. */
193
194 static int
195 is_64bit_tdesc (void)
196 {
197 struct regcache *regcache = get_thread_regcache (current_thread, 0);
198
199 return register_size (regcache->tdesc, 0) == 8;
200 }
201
202 #endif
203
204 \f
205 /* Called by libthread_db. */
206
207 ps_err_e
208 ps_get_thread_area (struct ps_prochandle *ph,
209 lwpid_t lwpid, int idx, void **base)
210 {
211 #ifdef __x86_64__
212 int use_64bit = is_64bit_tdesc ();
213
214 if (use_64bit)
215 {
216 switch (idx)
217 {
218 case FS:
219 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
220 return PS_OK;
221 break;
222 case GS:
223 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
224 return PS_OK;
225 break;
226 default:
227 return PS_BADADDR;
228 }
229 return PS_ERR;
230 }
231 #endif
232
233 {
234 unsigned int desc[4];
235
236 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
237 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
238 return PS_ERR;
239
240 /* Ensure we properly extend the value to 64-bits for x86_64. */
241 *base = (void *) (uintptr_t) desc[1];
242 return PS_OK;
243 }
244 }
245
246 /* Get the thread area address. This is used to recognize which
247 thread is which when tracing with the in-process agent library. We
248 don't read anything from the address, and treat it as opaque; it's
249 the address itself that we assume is unique per-thread. */
250
251 static int
252 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
253 {
254 #ifdef __x86_64__
255 int use_64bit = is_64bit_tdesc ();
256
257 if (use_64bit)
258 {
259 void *base;
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
261 {
262 *addr = (CORE_ADDR) (uintptr_t) base;
263 return 0;
264 }
265
266 return -1;
267 }
268 #endif
269
270 {
271 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
272 struct thread_info *thr = get_lwp_thread (lwp);
273 struct regcache *regcache = get_thread_regcache (thr, 1);
274 unsigned int desc[4];
275 ULONGEST gs = 0;
276 const int reg_thread_area = 3; /* bits to scale down register value. */
277 int idx;
278
279 collect_register_by_name (regcache, "gs", &gs);
280
281 idx = gs >> reg_thread_area;
282
283 if (ptrace (PTRACE_GET_THREAD_AREA,
284 lwpid_of (thr),
285 (void *) (long) idx, (unsigned long) &desc) < 0)
286 return -1;
287
288 *addr = desc[1];
289 return 0;
290 }
291 }
292
293
294 \f
295 static int
296 x86_cannot_store_register (int regno)
297 {
298 #ifdef __x86_64__
299 if (is_64bit_tdesc ())
300 return 0;
301 #endif
302
303 return regno >= I386_NUM_REGS;
304 }
305
306 static int
307 x86_cannot_fetch_register (int regno)
308 {
309 #ifdef __x86_64__
310 if (is_64bit_tdesc ())
311 return 0;
312 #endif
313
314 return regno >= I386_NUM_REGS;
315 }
316
317 static void
318 x86_fill_gregset (struct regcache *regcache, void *buf)
319 {
320 int i;
321
322 #ifdef __x86_64__
323 if (register_size (regcache->tdesc, 0) == 8)
324 {
325 for (i = 0; i < X86_64_NUM_REGS; i++)
326 if (x86_64_regmap[i] != -1)
327 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
328
329 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
330 {
331 unsigned long base;
332 int lwpid = lwpid_of (current_thread);
333
334 collect_register_by_name (regcache, "fs_base", &base);
335 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
336
337 collect_register_by_name (regcache, "gs_base", &base);
338 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
339 }
340 #endif
341
342 return;
343 }
344
345 /* 32-bit inferior registers need to be zero-extended.
346 Callers would read uninitialized memory otherwise. */
347 memset (buf, 0x00, X86_64_USER_REGS * 8);
348 #endif
349
350 for (i = 0; i < I386_NUM_REGS; i++)
351 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
352
353 collect_register_by_name (regcache, "orig_eax",
354 ((char *) buf) + ORIG_EAX * REGSIZE);
355
356 #ifdef __x86_64__
357 /* Sign extend EAX value to avoid potential syscall restart
358 problems.
359
360 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
361 for a detailed explanation. */
362 if (register_size (regcache->tdesc, 0) == 4)
363 {
364 void *ptr = ((gdb_byte *) buf
365 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
366
367 *(int64_t *) ptr = *(int32_t *) ptr;
368 }
369 #endif
370 }
371
372 static void
373 x86_store_gregset (struct regcache *regcache, const void *buf)
374 {
375 int i;
376
377 #ifdef __x86_64__
378 if (register_size (regcache->tdesc, 0) == 8)
379 {
380 for (i = 0; i < X86_64_NUM_REGS; i++)
381 if (x86_64_regmap[i] != -1)
382 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
383
384 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
385 {
386 unsigned long base;
387 int lwpid = lwpid_of (current_thread);
388
389 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
390 supply_register_by_name (regcache, "fs_base", &base);
391
392 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
393 supply_register_by_name (regcache, "gs_base", &base);
394 }
395 #endif
396 return;
397 }
398 #endif
399
400 for (i = 0; i < I386_NUM_REGS; i++)
401 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
402
403 supply_register_by_name (regcache, "orig_eax",
404 ((char *) buf) + ORIG_EAX * REGSIZE);
405 }
406
407 static void
408 x86_fill_fpregset (struct regcache *regcache, void *buf)
409 {
410 #ifdef __x86_64__
411 i387_cache_to_fxsave (regcache, buf);
412 #else
413 i387_cache_to_fsave (regcache, buf);
414 #endif
415 }
416
417 static void
418 x86_store_fpregset (struct regcache *regcache, const void *buf)
419 {
420 #ifdef __x86_64__
421 i387_fxsave_to_cache (regcache, buf);
422 #else
423 i387_fsave_to_cache (regcache, buf);
424 #endif
425 }
426
427 #ifndef __x86_64__
428
429 static void
430 x86_fill_fpxregset (struct regcache *regcache, void *buf)
431 {
432 i387_cache_to_fxsave (regcache, buf);
433 }
434
435 static void
436 x86_store_fpxregset (struct regcache *regcache, const void *buf)
437 {
438 i387_fxsave_to_cache (regcache, buf);
439 }
440
441 #endif
442
443 static void
444 x86_fill_xstateregset (struct regcache *regcache, void *buf)
445 {
446 i387_cache_to_xsave (regcache, buf);
447 }
448
449 static void
450 x86_store_xstateregset (struct regcache *regcache, const void *buf)
451 {
452 i387_xsave_to_cache (regcache, buf);
453 }
454
455 /* ??? The non-biarch i386 case stores all the i387 regs twice.
456 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
457 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
458 doesn't work. IWBN to avoid the duplication in the case where it
459 does work. Maybe the arch_setup routine could check whether it works
460 and update the supported regsets accordingly. */
461
462 static struct regset_info x86_regsets[] =
463 {
464 #ifdef HAVE_PTRACE_GETREGS
465 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
466 GENERAL_REGS,
467 x86_fill_gregset, x86_store_gregset },
468 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
469 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
470 # ifndef __x86_64__
471 # ifdef HAVE_PTRACE_GETFPXREGS
472 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
473 EXTENDED_REGS,
474 x86_fill_fpxregset, x86_store_fpxregset },
475 # endif
476 # endif
477 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
478 FP_REGS,
479 x86_fill_fpregset, x86_store_fpregset },
480 #endif /* HAVE_PTRACE_GETREGS */
481 NULL_REGSET
482 };
483
484 static CORE_ADDR
485 x86_get_pc (struct regcache *regcache)
486 {
487 int use_64bit = register_size (regcache->tdesc, 0) == 8;
488
489 if (use_64bit)
490 {
491 uint64_t pc;
492
493 collect_register_by_name (regcache, "rip", &pc);
494 return (CORE_ADDR) pc;
495 }
496 else
497 {
498 uint32_t pc;
499
500 collect_register_by_name (regcache, "eip", &pc);
501 return (CORE_ADDR) pc;
502 }
503 }
504
505 static void
506 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
507 {
508 int use_64bit = register_size (regcache->tdesc, 0) == 8;
509
510 if (use_64bit)
511 {
512 uint64_t newpc = pc;
513
514 supply_register_by_name (regcache, "rip", &newpc);
515 }
516 else
517 {
518 uint32_t newpc = pc;
519
520 supply_register_by_name (regcache, "eip", &newpc);
521 }
522 }
523 \f
524 static const gdb_byte x86_breakpoint[] = { 0xCC };
525 #define x86_breakpoint_len 1
526
527 static int
528 x86_breakpoint_at (CORE_ADDR pc)
529 {
530 unsigned char c;
531
532 the_target->read_memory (pc, &c, 1);
533 if (c == 0xCC)
534 return 1;
535
536 return 0;
537 }
538 \f
539 /* Low-level function vector. */
540 struct x86_dr_low_type x86_dr_low =
541 {
542 x86_linux_dr_set_control,
543 x86_linux_dr_set_addr,
544 x86_linux_dr_get_addr,
545 x86_linux_dr_get_status,
546 x86_linux_dr_get_control,
547 sizeof (void *),
548 };
549 \f
550 /* Breakpoint/Watchpoint support. */
551
552 static int
553 x86_supports_z_point_type (char z_type)
554 {
555 switch (z_type)
556 {
557 case Z_PACKET_SW_BP:
558 case Z_PACKET_HW_BP:
559 case Z_PACKET_WRITE_WP:
560 case Z_PACKET_ACCESS_WP:
561 return 1;
562 default:
563 return 0;
564 }
565 }
566
567 static int
568 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
569 int size, struct raw_breakpoint *bp)
570 {
571 struct process_info *proc = current_process ();
572
573 switch (type)
574 {
575 case raw_bkpt_type_hw:
576 case raw_bkpt_type_write_wp:
577 case raw_bkpt_type_access_wp:
578 {
579 enum target_hw_bp_type hw_type
580 = raw_bkpt_type_to_target_hw_bp_type (type);
581 struct x86_debug_reg_state *state
582 = &proc->priv->arch_private->debug_reg_state;
583
584 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
585 }
586
587 default:
588 /* Unsupported. */
589 return 1;
590 }
591 }
592
593 static int
594 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
595 int size, struct raw_breakpoint *bp)
596 {
597 struct process_info *proc = current_process ();
598
599 switch (type)
600 {
601 case raw_bkpt_type_hw:
602 case raw_bkpt_type_write_wp:
603 case raw_bkpt_type_access_wp:
604 {
605 enum target_hw_bp_type hw_type
606 = raw_bkpt_type_to_target_hw_bp_type (type);
607 struct x86_debug_reg_state *state
608 = &proc->priv->arch_private->debug_reg_state;
609
610 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
611 }
612 default:
613 /* Unsupported. */
614 return 1;
615 }
616 }
617
618 static int
619 x86_stopped_by_watchpoint (void)
620 {
621 struct process_info *proc = current_process ();
622 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
623 }
624
625 static CORE_ADDR
626 x86_stopped_data_address (void)
627 {
628 struct process_info *proc = current_process ();
629 CORE_ADDR addr;
630 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
631 &addr))
632 return addr;
633 return 0;
634 }
635 \f
636 /* Called when a new process is created. */
637
638 static struct arch_process_info *
639 x86_linux_new_process (void)
640 {
641 struct arch_process_info *info = XCNEW (struct arch_process_info);
642
643 x86_low_init_dregs (&info->debug_reg_state);
644
645 return info;
646 }
647
648 /* Called when a process is being deleted. */
649
650 static void
651 x86_linux_delete_process (struct arch_process_info *info)
652 {
653 xfree (info);
654 }
655
656 /* Target routine for linux_new_fork. */
657
658 static void
659 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
660 {
661 /* These are allocated by linux_add_process. */
662 gdb_assert (parent->priv != NULL
663 && parent->priv->arch_private != NULL);
664 gdb_assert (child->priv != NULL
665 && child->priv->arch_private != NULL);
666
667 /* Linux kernel before 2.6.33 commit
668 72f674d203cd230426437cdcf7dd6f681dad8b0d
669 will inherit hardware debug registers from parent
670 on fork/vfork/clone. Newer Linux kernels create such tasks with
671 zeroed debug registers.
672
673 GDB core assumes the child inherits the watchpoints/hw
674 breakpoints of the parent, and will remove them all from the
675 forked off process. Copy the debug registers mirrors into the
676 new process so that all breakpoints and watchpoints can be
677 removed together. The debug registers mirror will become zeroed
678 in the end before detaching the forked off process, thus making
679 this compatible with older Linux kernels too. */
680
681 *child->priv->arch_private = *parent->priv->arch_private;
682 }
683
684 /* See nat/x86-dregs.h. */
685
686 struct x86_debug_reg_state *
687 x86_debug_reg_state (pid_t pid)
688 {
689 struct process_info *proc = find_process_pid (pid);
690
691 return &proc->priv->arch_private->debug_reg_state;
692 }
693 \f
694 /* When GDBSERVER is built as a 64-bit application on linux, the
695 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
696 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
697 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
698 conversion in-place ourselves. */
699
700 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
701 layout of the inferiors' architecture. Returns true if any
702 conversion was done; false otherwise. If DIRECTION is 1, then copy
703 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
704 INF. */
705
706 static int
707 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
708 {
709 #ifdef __x86_64__
710 unsigned int machine;
711 int tid = lwpid_of (current_thread);
712 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
713
714 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
715 if (!is_64bit_tdesc ())
716 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
717 FIXUP_32);
718 /* No fixup for native x32 GDB. */
719 else if (!is_elf64 && sizeof (void *) == 8)
720 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
721 FIXUP_X32);
722 #endif
723
724 return 0;
725 }
726 \f
727 static int use_xml;
728
729 /* Format of XSAVE extended state is:
730 struct
731 {
732 fxsave_bytes[0..463]
733 sw_usable_bytes[464..511]
734 xstate_hdr_bytes[512..575]
735 avx_bytes[576..831]
736 future_state etc
737 };
738
739 Same memory layout will be used for the coredump NT_X86_XSTATE
740 representing the XSAVE extended state registers.
741
742 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
743 extended state mask, which is the same as the extended control register
744 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
745 together with the mask saved in the xstate_hdr_bytes to determine what
746 states the processor/OS supports and what state, used or initialized,
747 the process/thread is in. */
748 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
749
750 /* Does the current host support the GETFPXREGS request? The header
751 file may or may not define it, and even if it is defined, the
752 kernel will return EIO if it's running on a pre-SSE processor. */
753 int have_ptrace_getfpxregs =
754 #ifdef HAVE_PTRACE_GETFPXREGS
755 -1
756 #else
757 0
758 #endif
759 ;
760
761 /* Get Linux/x86 target description from running target. */
762
763 static const struct target_desc *
764 x86_linux_read_description (void)
765 {
766 unsigned int machine;
767 int is_elf64;
768 int xcr0_features;
769 int tid;
770 static uint64_t xcr0;
771 struct regset_info *regset;
772
773 tid = lwpid_of (current_thread);
774
775 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
776
777 if (sizeof (void *) == 4)
778 {
779 if (is_elf64 > 0)
780 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
781 #ifndef __x86_64__
782 else if (machine == EM_X86_64)
783 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
784 #endif
785 }
786
787 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
788 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
789 {
790 elf_fpxregset_t fpxregs;
791
792 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
793 {
794 have_ptrace_getfpxregs = 0;
795 have_ptrace_getregset = 0;
796 return i386_linux_read_description (X86_XSTATE_X87);
797 }
798 else
799 have_ptrace_getfpxregs = 1;
800 }
801 #endif
802
803 if (!use_xml)
804 {
805 x86_xcr0 = X86_XSTATE_SSE_MASK;
806
807 /* Don't use XML. */
808 #ifdef __x86_64__
809 if (machine == EM_X86_64)
810 return tdesc_amd64_linux_no_xml;
811 else
812 #endif
813 return tdesc_i386_linux_no_xml;
814 }
815
816 if (have_ptrace_getregset == -1)
817 {
818 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
819 struct iovec iov;
820
821 iov.iov_base = xstateregs;
822 iov.iov_len = sizeof (xstateregs);
823
824 /* Check if PTRACE_GETREGSET works. */
825 if (ptrace (PTRACE_GETREGSET, tid,
826 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
827 have_ptrace_getregset = 0;
828 else
829 {
830 have_ptrace_getregset = 1;
831
832 /* Get XCR0 from XSAVE extended state. */
833 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
834 / sizeof (uint64_t))];
835
836 /* Use PTRACE_GETREGSET if it is available. */
837 for (regset = x86_regsets;
838 regset->fill_function != NULL; regset++)
839 if (regset->get_request == PTRACE_GETREGSET)
840 regset->size = X86_XSTATE_SIZE (xcr0);
841 else if (regset->type != GENERAL_REGS)
842 regset->size = 0;
843 }
844 }
845
846 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
847 xcr0_features = (have_ptrace_getregset
848 && (xcr0 & X86_XSTATE_ALL_MASK));
849
850 if (xcr0_features)
851 x86_xcr0 = xcr0;
852
853 if (machine == EM_X86_64)
854 {
855 #ifdef __x86_64__
856 const target_desc *tdesc = NULL;
857
858 if (xcr0_features)
859 {
860 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
861 !is_elf64);
862 }
863
864 if (tdesc == NULL)
865 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
866 return tdesc;
867 #endif
868 }
869 else
870 {
871 const target_desc *tdesc = NULL;
872
873 if (xcr0_features)
874 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
875
876 if (tdesc == NULL)
877 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
878
879 return tdesc;
880 }
881
882 gdb_assert_not_reached ("failed to return tdesc");
883 }
884
885 /* Update all the target description of all processes; a new GDB
886 connected, and it may or not support xml target descriptions. */
887
888 static void
889 x86_linux_update_xmltarget (void)
890 {
891 struct thread_info *saved_thread = current_thread;
892
893 /* Before changing the register cache's internal layout, flush the
894 contents of the current valid caches back to the threads, and
895 release the current regcache objects. */
896 regcache_release ();
897
898 for_each_process ([] (process_info *proc) {
899 int pid = proc->pid;
900
901 /* Look up any thread of this process. */
902 current_thread = find_any_thread_of_pid (pid);
903
904 the_low_target.arch_setup ();
905 });
906
907 current_thread = saved_thread;
908 }
909
910 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
911 PTRACE_GETREGSET. */
912
913 static void
914 x86_linux_process_qsupported (char **features, int count)
915 {
916 int i;
917
918 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
919 with "i386" in qSupported query, it supports x86 XML target
920 descriptions. */
921 use_xml = 0;
922 for (i = 0; i < count; i++)
923 {
924 const char *feature = features[i];
925
926 if (startswith (feature, "xmlRegisters="))
927 {
928 char *copy = xstrdup (feature + 13);
929
930 char *saveptr;
931 for (char *p = strtok_r (copy, ",", &saveptr);
932 p != NULL;
933 p = strtok_r (NULL, ",", &saveptr))
934 {
935 if (strcmp (p, "i386") == 0)
936 {
937 use_xml = 1;
938 break;
939 }
940 }
941
942 free (copy);
943 }
944 }
945 x86_linux_update_xmltarget ();
946 }
947
948 /* Common for x86/x86-64. */
949
950 static struct regsets_info x86_regsets_info =
951 {
952 x86_regsets, /* regsets */
953 0, /* num_regsets */
954 NULL, /* disabled_regsets */
955 };
956
957 #ifdef __x86_64__
958 static struct regs_info amd64_linux_regs_info =
959 {
960 NULL, /* regset_bitmap */
961 NULL, /* usrregs_info */
962 &x86_regsets_info
963 };
964 #endif
965 static struct usrregs_info i386_linux_usrregs_info =
966 {
967 I386_NUM_REGS,
968 i386_regmap,
969 };
970
971 static struct regs_info i386_linux_regs_info =
972 {
973 NULL, /* regset_bitmap */
974 &i386_linux_usrregs_info,
975 &x86_regsets_info
976 };
977
978 static const struct regs_info *
979 x86_linux_regs_info (void)
980 {
981 #ifdef __x86_64__
982 if (is_64bit_tdesc ())
983 return &amd64_linux_regs_info;
984 else
985 #endif
986 return &i386_linux_regs_info;
987 }
988
989 /* Initialize the target description for the architecture of the
990 inferior. */
991
992 static void
993 x86_arch_setup (void)
994 {
995 current_process ()->tdesc = x86_linux_read_description ();
996 }
997
998 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
999 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1000
1001 static void
1002 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1003 {
1004 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1005
1006 if (use_64bit)
1007 {
1008 long l_sysno;
1009
1010 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1011 *sysno = (int) l_sysno;
1012 }
1013 else
1014 collect_register_by_name (regcache, "orig_eax", sysno);
1015 }
1016
1017 static int
1018 x86_supports_tracepoints (void)
1019 {
1020 return 1;
1021 }
1022
1023 static void
1024 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1025 {
1026 target_write_memory (*to, buf, len);
1027 *to += len;
1028 }
1029
1030 static int
1031 push_opcode (unsigned char *buf, const char *op)
1032 {
1033 unsigned char *buf_org = buf;
1034
1035 while (1)
1036 {
1037 char *endptr;
1038 unsigned long ul = strtoul (op, &endptr, 16);
1039
1040 if (endptr == op)
1041 break;
1042
1043 *buf++ = ul;
1044 op = endptr;
1045 }
1046
1047 return buf - buf_org;
1048 }
1049
1050 #ifdef __x86_64__
1051
1052 /* Build a jump pad that saves registers and calls a collection
1053 function. Writes a jump instruction to the jump pad to
1054 JJUMPAD_INSN. The caller is responsible to write it in at the
1055 tracepoint address. */
1056
1057 static int
1058 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1059 CORE_ADDR collector,
1060 CORE_ADDR lockaddr,
1061 ULONGEST orig_size,
1062 CORE_ADDR *jump_entry,
1063 CORE_ADDR *trampoline,
1064 ULONGEST *trampoline_size,
1065 unsigned char *jjump_pad_insn,
1066 ULONGEST *jjump_pad_insn_size,
1067 CORE_ADDR *adjusted_insn_addr,
1068 CORE_ADDR *adjusted_insn_addr_end,
1069 char *err)
1070 {
1071 unsigned char buf[40];
1072 int i, offset;
1073 int64_t loffset;
1074
1075 CORE_ADDR buildaddr = *jump_entry;
1076
1077 /* Build the jump pad. */
1078
1079 /* First, do tracepoint data collection. Save registers. */
1080 i = 0;
1081 /* Need to ensure stack pointer saved first. */
1082 buf[i++] = 0x54; /* push %rsp */
1083 buf[i++] = 0x55; /* push %rbp */
1084 buf[i++] = 0x57; /* push %rdi */
1085 buf[i++] = 0x56; /* push %rsi */
1086 buf[i++] = 0x52; /* push %rdx */
1087 buf[i++] = 0x51; /* push %rcx */
1088 buf[i++] = 0x53; /* push %rbx */
1089 buf[i++] = 0x50; /* push %rax */
1090 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1091 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1092 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1093 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1094 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1095 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1096 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1097 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1098 buf[i++] = 0x9c; /* pushfq */
1099 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1100 buf[i++] = 0xbf;
1101 memcpy (buf + i, &tpaddr, 8);
1102 i += 8;
1103 buf[i++] = 0x57; /* push %rdi */
1104 append_insns (&buildaddr, i, buf);
1105
1106 /* Stack space for the collecting_t object. */
1107 i = 0;
1108 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1109 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1110 memcpy (buf + i, &tpoint, 8);
1111 i += 8;
1112 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1113 i += push_opcode (&buf[i],
1114 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1115 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1116 append_insns (&buildaddr, i, buf);
1117
1118 /* spin-lock. */
1119 i = 0;
1120 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1121 memcpy (&buf[i], (void *) &lockaddr, 8);
1122 i += 8;
1123 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1124 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1125 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1126 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1127 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1128 append_insns (&buildaddr, i, buf);
1129
1130 /* Set up the gdb_collect call. */
1131 /* At this point, (stack pointer + 0x18) is the base of our saved
1132 register block. */
1133
1134 i = 0;
1135 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1136 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1137
1138 /* tpoint address may be 64-bit wide. */
1139 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1140 memcpy (buf + i, &tpoint, 8);
1141 i += 8;
1142 append_insns (&buildaddr, i, buf);
1143
1144 /* The collector function being in the shared library, may be
1145 >31-bits away off the jump pad. */
1146 i = 0;
1147 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1148 memcpy (buf + i, &collector, 8);
1149 i += 8;
1150 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1151 append_insns (&buildaddr, i, buf);
1152
1153 /* Clear the spin-lock. */
1154 i = 0;
1155 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1156 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1157 memcpy (buf + i, &lockaddr, 8);
1158 i += 8;
1159 append_insns (&buildaddr, i, buf);
1160
1161 /* Remove stack that had been used for the collect_t object. */
1162 i = 0;
1163 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1164 append_insns (&buildaddr, i, buf);
1165
1166 /* Restore register state. */
1167 i = 0;
1168 buf[i++] = 0x48; /* add $0x8,%rsp */
1169 buf[i++] = 0x83;
1170 buf[i++] = 0xc4;
1171 buf[i++] = 0x08;
1172 buf[i++] = 0x9d; /* popfq */
1173 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1174 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1175 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1176 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1177 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1178 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1179 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1180 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1181 buf[i++] = 0x58; /* pop %rax */
1182 buf[i++] = 0x5b; /* pop %rbx */
1183 buf[i++] = 0x59; /* pop %rcx */
1184 buf[i++] = 0x5a; /* pop %rdx */
1185 buf[i++] = 0x5e; /* pop %rsi */
1186 buf[i++] = 0x5f; /* pop %rdi */
1187 buf[i++] = 0x5d; /* pop %rbp */
1188 buf[i++] = 0x5c; /* pop %rsp */
1189 append_insns (&buildaddr, i, buf);
1190
1191 /* Now, adjust the original instruction to execute in the jump
1192 pad. */
1193 *adjusted_insn_addr = buildaddr;
1194 relocate_instruction (&buildaddr, tpaddr);
1195 *adjusted_insn_addr_end = buildaddr;
1196
1197 /* Finally, write a jump back to the program. */
1198
1199 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1200 if (loffset > INT_MAX || loffset < INT_MIN)
1201 {
1202 sprintf (err,
1203 "E.Jump back from jump pad too far from tracepoint "
1204 "(offset 0x%" PRIx64 " > int32).", loffset);
1205 return 1;
1206 }
1207
1208 offset = (int) loffset;
1209 memcpy (buf, jump_insn, sizeof (jump_insn));
1210 memcpy (buf + 1, &offset, 4);
1211 append_insns (&buildaddr, sizeof (jump_insn), buf);
1212
1213 /* The jump pad is now built. Wire in a jump to our jump pad. This
1214 is always done last (by our caller actually), so that we can
1215 install fast tracepoints with threads running. This relies on
1216 the agent's atomic write support. */
1217 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1218 if (loffset > INT_MAX || loffset < INT_MIN)
1219 {
1220 sprintf (err,
1221 "E.Jump pad too far from tracepoint "
1222 "(offset 0x%" PRIx64 " > int32).", loffset);
1223 return 1;
1224 }
1225
1226 offset = (int) loffset;
1227
1228 memcpy (buf, jump_insn, sizeof (jump_insn));
1229 memcpy (buf + 1, &offset, 4);
1230 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1231 *jjump_pad_insn_size = sizeof (jump_insn);
1232
1233 /* Return the end address of our pad. */
1234 *jump_entry = buildaddr;
1235
1236 return 0;
1237 }
1238
1239 #endif /* __x86_64__ */
1240
1241 /* Build a jump pad that saves registers and calls a collection
1242 function. Writes a jump instruction to the jump pad to
1243 JJUMPAD_INSN. The caller is responsible to write it in at the
1244 tracepoint address. */
1245
1246 static int
1247 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1248 CORE_ADDR collector,
1249 CORE_ADDR lockaddr,
1250 ULONGEST orig_size,
1251 CORE_ADDR *jump_entry,
1252 CORE_ADDR *trampoline,
1253 ULONGEST *trampoline_size,
1254 unsigned char *jjump_pad_insn,
1255 ULONGEST *jjump_pad_insn_size,
1256 CORE_ADDR *adjusted_insn_addr,
1257 CORE_ADDR *adjusted_insn_addr_end,
1258 char *err)
1259 {
1260 unsigned char buf[0x100];
1261 int i, offset;
1262 CORE_ADDR buildaddr = *jump_entry;
1263
1264 /* Build the jump pad. */
1265
1266 /* First, do tracepoint data collection. Save registers. */
1267 i = 0;
1268 buf[i++] = 0x60; /* pushad */
1269 buf[i++] = 0x68; /* push tpaddr aka $pc */
1270 *((int *)(buf + i)) = (int) tpaddr;
1271 i += 4;
1272 buf[i++] = 0x9c; /* pushf */
1273 buf[i++] = 0x1e; /* push %ds */
1274 buf[i++] = 0x06; /* push %es */
1275 buf[i++] = 0x0f; /* push %fs */
1276 buf[i++] = 0xa0;
1277 buf[i++] = 0x0f; /* push %gs */
1278 buf[i++] = 0xa8;
1279 buf[i++] = 0x16; /* push %ss */
1280 buf[i++] = 0x0e; /* push %cs */
1281 append_insns (&buildaddr, i, buf);
1282
1283 /* Stack space for the collecting_t object. */
1284 i = 0;
1285 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1286
1287 /* Build the object. */
1288 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1289 memcpy (buf + i, &tpoint, 4);
1290 i += 4;
1291 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1292
1293 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1294 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1295 append_insns (&buildaddr, i, buf);
1296
1297 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1298 If we cared for it, this could be using xchg alternatively. */
1299
1300 i = 0;
1301 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1302 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1303 %esp,<lockaddr> */
1304 memcpy (&buf[i], (void *) &lockaddr, 4);
1305 i += 4;
1306 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1307 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1308 append_insns (&buildaddr, i, buf);
1309
1310
1311 /* Set up arguments to the gdb_collect call. */
1312 i = 0;
1313 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1314 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1315 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1316 append_insns (&buildaddr, i, buf);
1317
1318 i = 0;
1319 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1320 append_insns (&buildaddr, i, buf);
1321
1322 i = 0;
1323 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1324 memcpy (&buf[i], (void *) &tpoint, 4);
1325 i += 4;
1326 append_insns (&buildaddr, i, buf);
1327
1328 buf[0] = 0xe8; /* call <reladdr> */
1329 offset = collector - (buildaddr + sizeof (jump_insn));
1330 memcpy (buf + 1, &offset, 4);
1331 append_insns (&buildaddr, 5, buf);
1332 /* Clean up after the call. */
1333 buf[0] = 0x83; /* add $0x8,%esp */
1334 buf[1] = 0xc4;
1335 buf[2] = 0x08;
1336 append_insns (&buildaddr, 3, buf);
1337
1338
1339 /* Clear the spin-lock. This would need the LOCK prefix on older
1340 broken archs. */
1341 i = 0;
1342 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1343 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1344 memcpy (buf + i, &lockaddr, 4);
1345 i += 4;
1346 append_insns (&buildaddr, i, buf);
1347
1348
1349 /* Remove stack that had been used for the collect_t object. */
1350 i = 0;
1351 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1352 append_insns (&buildaddr, i, buf);
1353
1354 i = 0;
1355 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1356 buf[i++] = 0xc4;
1357 buf[i++] = 0x04;
1358 buf[i++] = 0x17; /* pop %ss */
1359 buf[i++] = 0x0f; /* pop %gs */
1360 buf[i++] = 0xa9;
1361 buf[i++] = 0x0f; /* pop %fs */
1362 buf[i++] = 0xa1;
1363 buf[i++] = 0x07; /* pop %es */
1364 buf[i++] = 0x1f; /* pop %ds */
1365 buf[i++] = 0x9d; /* popf */
1366 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1367 buf[i++] = 0xc4;
1368 buf[i++] = 0x04;
1369 buf[i++] = 0x61; /* popad */
1370 append_insns (&buildaddr, i, buf);
1371
1372 /* Now, adjust the original instruction to execute in the jump
1373 pad. */
1374 *adjusted_insn_addr = buildaddr;
1375 relocate_instruction (&buildaddr, tpaddr);
1376 *adjusted_insn_addr_end = buildaddr;
1377
1378 /* Write the jump back to the program. */
1379 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1380 memcpy (buf, jump_insn, sizeof (jump_insn));
1381 memcpy (buf + 1, &offset, 4);
1382 append_insns (&buildaddr, sizeof (jump_insn), buf);
1383
1384 /* The jump pad is now built. Wire in a jump to our jump pad. This
1385 is always done last (by our caller actually), so that we can
1386 install fast tracepoints with threads running. This relies on
1387 the agent's atomic write support. */
1388 if (orig_size == 4)
1389 {
1390 /* Create a trampoline. */
1391 *trampoline_size = sizeof (jump_insn);
1392 if (!claim_trampoline_space (*trampoline_size, trampoline))
1393 {
1394 /* No trampoline space available. */
1395 strcpy (err,
1396 "E.Cannot allocate trampoline space needed for fast "
1397 "tracepoints on 4-byte instructions.");
1398 return 1;
1399 }
1400
1401 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1402 memcpy (buf, jump_insn, sizeof (jump_insn));
1403 memcpy (buf + 1, &offset, 4);
1404 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1405
1406 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1407 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1408 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1409 memcpy (buf + 2, &offset, 2);
1410 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1411 *jjump_pad_insn_size = sizeof (small_jump_insn);
1412 }
1413 else
1414 {
1415 /* Else use a 32-bit relative jump instruction. */
1416 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1417 memcpy (buf, jump_insn, sizeof (jump_insn));
1418 memcpy (buf + 1, &offset, 4);
1419 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1420 *jjump_pad_insn_size = sizeof (jump_insn);
1421 }
1422
1423 /* Return the end address of our pad. */
1424 *jump_entry = buildaddr;
1425
1426 return 0;
1427 }
1428
1429 static int
1430 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1431 CORE_ADDR collector,
1432 CORE_ADDR lockaddr,
1433 ULONGEST orig_size,
1434 CORE_ADDR *jump_entry,
1435 CORE_ADDR *trampoline,
1436 ULONGEST *trampoline_size,
1437 unsigned char *jjump_pad_insn,
1438 ULONGEST *jjump_pad_insn_size,
1439 CORE_ADDR *adjusted_insn_addr,
1440 CORE_ADDR *adjusted_insn_addr_end,
1441 char *err)
1442 {
1443 #ifdef __x86_64__
1444 if (is_64bit_tdesc ())
1445 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1446 collector, lockaddr,
1447 orig_size, jump_entry,
1448 trampoline, trampoline_size,
1449 jjump_pad_insn,
1450 jjump_pad_insn_size,
1451 adjusted_insn_addr,
1452 adjusted_insn_addr_end,
1453 err);
1454 #endif
1455
1456 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1457 collector, lockaddr,
1458 orig_size, jump_entry,
1459 trampoline, trampoline_size,
1460 jjump_pad_insn,
1461 jjump_pad_insn_size,
1462 adjusted_insn_addr,
1463 adjusted_insn_addr_end,
1464 err);
1465 }
1466
1467 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1468 architectures. */
1469
1470 static int
1471 x86_get_min_fast_tracepoint_insn_len (void)
1472 {
1473 static int warned_about_fast_tracepoints = 0;
1474
1475 #ifdef __x86_64__
1476 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1477 used for fast tracepoints. */
1478 if (is_64bit_tdesc ())
1479 return 5;
1480 #endif
1481
1482 if (agent_loaded_p ())
1483 {
1484 char errbuf[IPA_BUFSIZ];
1485
1486 errbuf[0] = '\0';
1487
1488 /* On x86, if trampolines are available, then 4-byte jump instructions
1489 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1490 with a 4-byte offset are used instead. */
1491 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1492 return 4;
1493 else
1494 {
1495 /* GDB has no channel to explain to user why a shorter fast
1496 tracepoint is not possible, but at least make GDBserver
1497 mention that something has gone awry. */
1498 if (!warned_about_fast_tracepoints)
1499 {
1500 warning ("4-byte fast tracepoints not available; %s", errbuf);
1501 warned_about_fast_tracepoints = 1;
1502 }
1503 return 5;
1504 }
1505 }
1506 else
1507 {
1508 /* Indicate that the minimum length is currently unknown since the IPA
1509 has not loaded yet. */
1510 return 0;
1511 }
1512 }
1513
1514 static void
1515 add_insns (unsigned char *start, int len)
1516 {
1517 CORE_ADDR buildaddr = current_insn_ptr;
1518
1519 if (debug_threads)
1520 debug_printf ("Adding %d bytes of insn at %s\n",
1521 len, paddress (buildaddr));
1522
1523 append_insns (&buildaddr, len, start);
1524 current_insn_ptr = buildaddr;
1525 }
1526
1527 /* Our general strategy for emitting code is to avoid specifying raw
1528 bytes whenever possible, and instead copy a block of inline asm
1529 that is embedded in the function. This is a little messy, because
1530 we need to keep the compiler from discarding what looks like dead
1531 code, plus suppress various warnings. */
1532
1533 #define EMIT_ASM(NAME, INSNS) \
1534 do \
1535 { \
1536 extern unsigned char start_ ## NAME, end_ ## NAME; \
1537 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1538 __asm__ ("jmp end_" #NAME "\n" \
1539 "\t" "start_" #NAME ":" \
1540 "\t" INSNS "\n" \
1541 "\t" "end_" #NAME ":"); \
1542 } while (0)
1543
1544 #ifdef __x86_64__
1545
1546 #define EMIT_ASM32(NAME,INSNS) \
1547 do \
1548 { \
1549 extern unsigned char start_ ## NAME, end_ ## NAME; \
1550 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1551 __asm__ (".code32\n" \
1552 "\t" "jmp end_" #NAME "\n" \
1553 "\t" "start_" #NAME ":\n" \
1554 "\t" INSNS "\n" \
1555 "\t" "end_" #NAME ":\n" \
1556 ".code64\n"); \
1557 } while (0)
1558
1559 #else
1560
1561 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1562
1563 #endif
1564
1565 #ifdef __x86_64__
1566
1567 static void
1568 amd64_emit_prologue (void)
1569 {
1570 EMIT_ASM (amd64_prologue,
1571 "pushq %rbp\n\t"
1572 "movq %rsp,%rbp\n\t"
1573 "sub $0x20,%rsp\n\t"
1574 "movq %rdi,-8(%rbp)\n\t"
1575 "movq %rsi,-16(%rbp)");
1576 }
1577
1578
1579 static void
1580 amd64_emit_epilogue (void)
1581 {
1582 EMIT_ASM (amd64_epilogue,
1583 "movq -16(%rbp),%rdi\n\t"
1584 "movq %rax,(%rdi)\n\t"
1585 "xor %rax,%rax\n\t"
1586 "leave\n\t"
1587 "ret");
1588 }
1589
1590 static void
1591 amd64_emit_add (void)
1592 {
1593 EMIT_ASM (amd64_add,
1594 "add (%rsp),%rax\n\t"
1595 "lea 0x8(%rsp),%rsp");
1596 }
1597
1598 static void
1599 amd64_emit_sub (void)
1600 {
1601 EMIT_ASM (amd64_sub,
1602 "sub %rax,(%rsp)\n\t"
1603 "pop %rax");
1604 }
1605
1606 static void
1607 amd64_emit_mul (void)
1608 {
1609 emit_error = 1;
1610 }
1611
1612 static void
1613 amd64_emit_lsh (void)
1614 {
1615 emit_error = 1;
1616 }
1617
1618 static void
1619 amd64_emit_rsh_signed (void)
1620 {
1621 emit_error = 1;
1622 }
1623
1624 static void
1625 amd64_emit_rsh_unsigned (void)
1626 {
1627 emit_error = 1;
1628 }
1629
1630 static void
1631 amd64_emit_ext (int arg)
1632 {
1633 switch (arg)
1634 {
1635 case 8:
1636 EMIT_ASM (amd64_ext_8,
1637 "cbtw\n\t"
1638 "cwtl\n\t"
1639 "cltq");
1640 break;
1641 case 16:
1642 EMIT_ASM (amd64_ext_16,
1643 "cwtl\n\t"
1644 "cltq");
1645 break;
1646 case 32:
1647 EMIT_ASM (amd64_ext_32,
1648 "cltq");
1649 break;
1650 default:
1651 emit_error = 1;
1652 }
1653 }
1654
1655 static void
1656 amd64_emit_log_not (void)
1657 {
1658 EMIT_ASM (amd64_log_not,
1659 "test %rax,%rax\n\t"
1660 "sete %cl\n\t"
1661 "movzbq %cl,%rax");
1662 }
1663
1664 static void
1665 amd64_emit_bit_and (void)
1666 {
1667 EMIT_ASM (amd64_and,
1668 "and (%rsp),%rax\n\t"
1669 "lea 0x8(%rsp),%rsp");
1670 }
1671
1672 static void
1673 amd64_emit_bit_or (void)
1674 {
1675 EMIT_ASM (amd64_or,
1676 "or (%rsp),%rax\n\t"
1677 "lea 0x8(%rsp),%rsp");
1678 }
1679
1680 static void
1681 amd64_emit_bit_xor (void)
1682 {
1683 EMIT_ASM (amd64_xor,
1684 "xor (%rsp),%rax\n\t"
1685 "lea 0x8(%rsp),%rsp");
1686 }
1687
1688 static void
1689 amd64_emit_bit_not (void)
1690 {
1691 EMIT_ASM (amd64_bit_not,
1692 "xorq $0xffffffffffffffff,%rax");
1693 }
1694
1695 static void
1696 amd64_emit_equal (void)
1697 {
1698 EMIT_ASM (amd64_equal,
1699 "cmp %rax,(%rsp)\n\t"
1700 "je .Lamd64_equal_true\n\t"
1701 "xor %rax,%rax\n\t"
1702 "jmp .Lamd64_equal_end\n\t"
1703 ".Lamd64_equal_true:\n\t"
1704 "mov $0x1,%rax\n\t"
1705 ".Lamd64_equal_end:\n\t"
1706 "lea 0x8(%rsp),%rsp");
1707 }
1708
1709 static void
1710 amd64_emit_less_signed (void)
1711 {
1712 EMIT_ASM (amd64_less_signed,
1713 "cmp %rax,(%rsp)\n\t"
1714 "jl .Lamd64_less_signed_true\n\t"
1715 "xor %rax,%rax\n\t"
1716 "jmp .Lamd64_less_signed_end\n\t"
1717 ".Lamd64_less_signed_true:\n\t"
1718 "mov $1,%rax\n\t"
1719 ".Lamd64_less_signed_end:\n\t"
1720 "lea 0x8(%rsp),%rsp");
1721 }
1722
1723 static void
1724 amd64_emit_less_unsigned (void)
1725 {
1726 EMIT_ASM (amd64_less_unsigned,
1727 "cmp %rax,(%rsp)\n\t"
1728 "jb .Lamd64_less_unsigned_true\n\t"
1729 "xor %rax,%rax\n\t"
1730 "jmp .Lamd64_less_unsigned_end\n\t"
1731 ".Lamd64_less_unsigned_true:\n\t"
1732 "mov $1,%rax\n\t"
1733 ".Lamd64_less_unsigned_end:\n\t"
1734 "lea 0x8(%rsp),%rsp");
1735 }
1736
1737 static void
1738 amd64_emit_ref (int size)
1739 {
1740 switch (size)
1741 {
1742 case 1:
1743 EMIT_ASM (amd64_ref1,
1744 "movb (%rax),%al");
1745 break;
1746 case 2:
1747 EMIT_ASM (amd64_ref2,
1748 "movw (%rax),%ax");
1749 break;
1750 case 4:
1751 EMIT_ASM (amd64_ref4,
1752 "movl (%rax),%eax");
1753 break;
1754 case 8:
1755 EMIT_ASM (amd64_ref8,
1756 "movq (%rax),%rax");
1757 break;
1758 }
1759 }
1760
1761 static void
1762 amd64_emit_if_goto (int *offset_p, int *size_p)
1763 {
1764 EMIT_ASM (amd64_if_goto,
1765 "mov %rax,%rcx\n\t"
1766 "pop %rax\n\t"
1767 "cmp $0,%rcx\n\t"
1768 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1769 if (offset_p)
1770 *offset_p = 10;
1771 if (size_p)
1772 *size_p = 4;
1773 }
1774
1775 static void
1776 amd64_emit_goto (int *offset_p, int *size_p)
1777 {
1778 EMIT_ASM (amd64_goto,
1779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1780 if (offset_p)
1781 *offset_p = 1;
1782 if (size_p)
1783 *size_p = 4;
1784 }
1785
1786 static void
1787 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1788 {
1789 int diff = (to - (from + size));
1790 unsigned char buf[sizeof (int)];
1791
1792 if (size != 4)
1793 {
1794 emit_error = 1;
1795 return;
1796 }
1797
1798 memcpy (buf, &diff, sizeof (int));
1799 target_write_memory (from, buf, sizeof (int));
1800 }
1801
1802 static void
1803 amd64_emit_const (LONGEST num)
1804 {
1805 unsigned char buf[16];
1806 int i;
1807 CORE_ADDR buildaddr = current_insn_ptr;
1808
1809 i = 0;
1810 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1811 memcpy (&buf[i], &num, sizeof (num));
1812 i += 8;
1813 append_insns (&buildaddr, i, buf);
1814 current_insn_ptr = buildaddr;
1815 }
1816
1817 static void
1818 amd64_emit_call (CORE_ADDR fn)
1819 {
1820 unsigned char buf[16];
1821 int i;
1822 CORE_ADDR buildaddr;
1823 LONGEST offset64;
1824
1825 /* The destination function being in the shared library, may be
1826 >31-bits away off the compiled code pad. */
1827
1828 buildaddr = current_insn_ptr;
1829
1830 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1831
1832 i = 0;
1833
1834 if (offset64 > INT_MAX || offset64 < INT_MIN)
1835 {
1836 /* Offset is too large for a call. Use callq, but that requires
1837 a register, so avoid it if possible. Use r10, since it is
1838 call-clobbered, we don't have to push/pop it. */
1839 buf[i++] = 0x48; /* mov $fn,%r10 */
1840 buf[i++] = 0xba;
1841 memcpy (buf + i, &fn, 8);
1842 i += 8;
1843 buf[i++] = 0xff; /* callq *%r10 */
1844 buf[i++] = 0xd2;
1845 }
1846 else
1847 {
1848 int offset32 = offset64; /* we know we can't overflow here. */
1849
1850 buf[i++] = 0xe8; /* call <reladdr> */
1851 memcpy (buf + i, &offset32, 4);
1852 i += 4;
1853 }
1854
1855 append_insns (&buildaddr, i, buf);
1856 current_insn_ptr = buildaddr;
1857 }
1858
1859 static void
1860 amd64_emit_reg (int reg)
1861 {
1862 unsigned char buf[16];
1863 int i;
1864 CORE_ADDR buildaddr;
1865
1866 /* Assume raw_regs is still in %rdi. */
1867 buildaddr = current_insn_ptr;
1868 i = 0;
1869 buf[i++] = 0xbe; /* mov $<n>,%esi */
1870 memcpy (&buf[i], &reg, sizeof (reg));
1871 i += 4;
1872 append_insns (&buildaddr, i, buf);
1873 current_insn_ptr = buildaddr;
1874 amd64_emit_call (get_raw_reg_func_addr ());
1875 }
1876
1877 static void
1878 amd64_emit_pop (void)
1879 {
1880 EMIT_ASM (amd64_pop,
1881 "pop %rax");
1882 }
1883
1884 static void
1885 amd64_emit_stack_flush (void)
1886 {
1887 EMIT_ASM (amd64_stack_flush,
1888 "push %rax");
1889 }
1890
1891 static void
1892 amd64_emit_zero_ext (int arg)
1893 {
1894 switch (arg)
1895 {
1896 case 8:
1897 EMIT_ASM (amd64_zero_ext_8,
1898 "and $0xff,%rax");
1899 break;
1900 case 16:
1901 EMIT_ASM (amd64_zero_ext_16,
1902 "and $0xffff,%rax");
1903 break;
1904 case 32:
1905 EMIT_ASM (amd64_zero_ext_32,
1906 "mov $0xffffffff,%rcx\n\t"
1907 "and %rcx,%rax");
1908 break;
1909 default:
1910 emit_error = 1;
1911 }
1912 }
1913
1914 static void
1915 amd64_emit_swap (void)
1916 {
1917 EMIT_ASM (amd64_swap,
1918 "mov %rax,%rcx\n\t"
1919 "pop %rax\n\t"
1920 "push %rcx");
1921 }
1922
1923 static void
1924 amd64_emit_stack_adjust (int n)
1925 {
1926 unsigned char buf[16];
1927 int i;
1928 CORE_ADDR buildaddr = current_insn_ptr;
1929
1930 i = 0;
1931 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1932 buf[i++] = 0x8d;
1933 buf[i++] = 0x64;
1934 buf[i++] = 0x24;
1935 /* This only handles adjustments up to 16, but we don't expect any more. */
1936 buf[i++] = n * 8;
1937 append_insns (&buildaddr, i, buf);
1938 current_insn_ptr = buildaddr;
1939 }
1940
1941 /* FN's prototype is `LONGEST(*fn)(int)'. */
1942
1943 static void
1944 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1945 {
1946 unsigned char buf[16];
1947 int i;
1948 CORE_ADDR buildaddr;
1949
1950 buildaddr = current_insn_ptr;
1951 i = 0;
1952 buf[i++] = 0xbf; /* movl $<n>,%edi */
1953 memcpy (&buf[i], &arg1, sizeof (arg1));
1954 i += 4;
1955 append_insns (&buildaddr, i, buf);
1956 current_insn_ptr = buildaddr;
1957 amd64_emit_call (fn);
1958 }
1959
1960 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1961
1962 static void
1963 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1964 {
1965 unsigned char buf[16];
1966 int i;
1967 CORE_ADDR buildaddr;
1968
1969 buildaddr = current_insn_ptr;
1970 i = 0;
1971 buf[i++] = 0xbf; /* movl $<n>,%edi */
1972 memcpy (&buf[i], &arg1, sizeof (arg1));
1973 i += 4;
1974 append_insns (&buildaddr, i, buf);
1975 current_insn_ptr = buildaddr;
1976 EMIT_ASM (amd64_void_call_2_a,
1977 /* Save away a copy of the stack top. */
1978 "push %rax\n\t"
1979 /* Also pass top as the second argument. */
1980 "mov %rax,%rsi");
1981 amd64_emit_call (fn);
1982 EMIT_ASM (amd64_void_call_2_b,
1983 /* Restore the stack top, %rax may have been trashed. */
1984 "pop %rax");
1985 }
1986
1987 static void
1988 amd64_emit_eq_goto (int *offset_p, int *size_p)
1989 {
1990 EMIT_ASM (amd64_eq,
1991 "cmp %rax,(%rsp)\n\t"
1992 "jne .Lamd64_eq_fallthru\n\t"
1993 "lea 0x8(%rsp),%rsp\n\t"
1994 "pop %rax\n\t"
1995 /* jmp, but don't trust the assembler to choose the right jump */
1996 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1997 ".Lamd64_eq_fallthru:\n\t"
1998 "lea 0x8(%rsp),%rsp\n\t"
1999 "pop %rax");
2000
2001 if (offset_p)
2002 *offset_p = 13;
2003 if (size_p)
2004 *size_p = 4;
2005 }
2006
2007 static void
2008 amd64_emit_ne_goto (int *offset_p, int *size_p)
2009 {
2010 EMIT_ASM (amd64_ne,
2011 "cmp %rax,(%rsp)\n\t"
2012 "je .Lamd64_ne_fallthru\n\t"
2013 "lea 0x8(%rsp),%rsp\n\t"
2014 "pop %rax\n\t"
2015 /* jmp, but don't trust the assembler to choose the right jump */
2016 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2017 ".Lamd64_ne_fallthru:\n\t"
2018 "lea 0x8(%rsp),%rsp\n\t"
2019 "pop %rax");
2020
2021 if (offset_p)
2022 *offset_p = 13;
2023 if (size_p)
2024 *size_p = 4;
2025 }
2026
2027 static void
2028 amd64_emit_lt_goto (int *offset_p, int *size_p)
2029 {
2030 EMIT_ASM (amd64_lt,
2031 "cmp %rax,(%rsp)\n\t"
2032 "jnl .Lamd64_lt_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_lt_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax");
2040
2041 if (offset_p)
2042 *offset_p = 13;
2043 if (size_p)
2044 *size_p = 4;
2045 }
2046
2047 static void
2048 amd64_emit_le_goto (int *offset_p, int *size_p)
2049 {
2050 EMIT_ASM (amd64_le,
2051 "cmp %rax,(%rsp)\n\t"
2052 "jnle .Lamd64_le_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_le_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax");
2060
2061 if (offset_p)
2062 *offset_p = 13;
2063 if (size_p)
2064 *size_p = 4;
2065 }
2066
2067 static void
2068 amd64_emit_gt_goto (int *offset_p, int *size_p)
2069 {
2070 EMIT_ASM (amd64_gt,
2071 "cmp %rax,(%rsp)\n\t"
2072 "jng .Lamd64_gt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_gt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax");
2080
2081 if (offset_p)
2082 *offset_p = 13;
2083 if (size_p)
2084 *size_p = 4;
2085 }
2086
2087 static void
2088 amd64_emit_ge_goto (int *offset_p, int *size_p)
2089 {
2090 EMIT_ASM (amd64_ge,
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnge .Lamd64_ge_fallthru\n\t"
2093 ".Lamd64_ge_jump:\n\t"
2094 "lea 0x8(%rsp),%rsp\n\t"
2095 "pop %rax\n\t"
2096 /* jmp, but don't trust the assembler to choose the right jump */
2097 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2098 ".Lamd64_ge_fallthru:\n\t"
2099 "lea 0x8(%rsp),%rsp\n\t"
2100 "pop %rax");
2101
2102 if (offset_p)
2103 *offset_p = 13;
2104 if (size_p)
2105 *size_p = 4;
2106 }
2107
2108 struct emit_ops amd64_emit_ops =
2109 {
2110 amd64_emit_prologue,
2111 amd64_emit_epilogue,
2112 amd64_emit_add,
2113 amd64_emit_sub,
2114 amd64_emit_mul,
2115 amd64_emit_lsh,
2116 amd64_emit_rsh_signed,
2117 amd64_emit_rsh_unsigned,
2118 amd64_emit_ext,
2119 amd64_emit_log_not,
2120 amd64_emit_bit_and,
2121 amd64_emit_bit_or,
2122 amd64_emit_bit_xor,
2123 amd64_emit_bit_not,
2124 amd64_emit_equal,
2125 amd64_emit_less_signed,
2126 amd64_emit_less_unsigned,
2127 amd64_emit_ref,
2128 amd64_emit_if_goto,
2129 amd64_emit_goto,
2130 amd64_write_goto_address,
2131 amd64_emit_const,
2132 amd64_emit_call,
2133 amd64_emit_reg,
2134 amd64_emit_pop,
2135 amd64_emit_stack_flush,
2136 amd64_emit_zero_ext,
2137 amd64_emit_swap,
2138 amd64_emit_stack_adjust,
2139 amd64_emit_int_call_1,
2140 amd64_emit_void_call_2,
2141 amd64_emit_eq_goto,
2142 amd64_emit_ne_goto,
2143 amd64_emit_lt_goto,
2144 amd64_emit_le_goto,
2145 amd64_emit_gt_goto,
2146 amd64_emit_ge_goto
2147 };
2148
2149 #endif /* __x86_64__ */
2150
2151 static void
2152 i386_emit_prologue (void)
2153 {
2154 EMIT_ASM32 (i386_prologue,
2155 "push %ebp\n\t"
2156 "mov %esp,%ebp\n\t"
2157 "push %ebx");
2158 /* At this point, the raw regs base address is at 8(%ebp), and the
2159 value pointer is at 12(%ebp). */
2160 }
2161
2162 static void
2163 i386_emit_epilogue (void)
2164 {
2165 EMIT_ASM32 (i386_epilogue,
2166 "mov 12(%ebp),%ecx\n\t"
2167 "mov %eax,(%ecx)\n\t"
2168 "mov %ebx,0x4(%ecx)\n\t"
2169 "xor %eax,%eax\n\t"
2170 "pop %ebx\n\t"
2171 "pop %ebp\n\t"
2172 "ret");
2173 }
2174
2175 static void
2176 i386_emit_add (void)
2177 {
2178 EMIT_ASM32 (i386_add,
2179 "add (%esp),%eax\n\t"
2180 "adc 0x4(%esp),%ebx\n\t"
2181 "lea 0x8(%esp),%esp");
2182 }
2183
2184 static void
2185 i386_emit_sub (void)
2186 {
2187 EMIT_ASM32 (i386_sub,
2188 "subl %eax,(%esp)\n\t"
2189 "sbbl %ebx,4(%esp)\n\t"
2190 "pop %eax\n\t"
2191 "pop %ebx\n\t");
2192 }
2193
2194 static void
2195 i386_emit_mul (void)
2196 {
2197 emit_error = 1;
2198 }
2199
2200 static void
2201 i386_emit_lsh (void)
2202 {
2203 emit_error = 1;
2204 }
2205
2206 static void
2207 i386_emit_rsh_signed (void)
2208 {
2209 emit_error = 1;
2210 }
2211
2212 static void
2213 i386_emit_rsh_unsigned (void)
2214 {
2215 emit_error = 1;
2216 }
2217
2218 static void
2219 i386_emit_ext (int arg)
2220 {
2221 switch (arg)
2222 {
2223 case 8:
2224 EMIT_ASM32 (i386_ext_8,
2225 "cbtw\n\t"
2226 "cwtl\n\t"
2227 "movl %eax,%ebx\n\t"
2228 "sarl $31,%ebx");
2229 break;
2230 case 16:
2231 EMIT_ASM32 (i386_ext_16,
2232 "cwtl\n\t"
2233 "movl %eax,%ebx\n\t"
2234 "sarl $31,%ebx");
2235 break;
2236 case 32:
2237 EMIT_ASM32 (i386_ext_32,
2238 "movl %eax,%ebx\n\t"
2239 "sarl $31,%ebx");
2240 break;
2241 default:
2242 emit_error = 1;
2243 }
2244 }
2245
2246 static void
2247 i386_emit_log_not (void)
2248 {
2249 EMIT_ASM32 (i386_log_not,
2250 "or %ebx,%eax\n\t"
2251 "test %eax,%eax\n\t"
2252 "sete %cl\n\t"
2253 "xor %ebx,%ebx\n\t"
2254 "movzbl %cl,%eax");
2255 }
2256
2257 static void
2258 i386_emit_bit_and (void)
2259 {
2260 EMIT_ASM32 (i386_and,
2261 "and (%esp),%eax\n\t"
2262 "and 0x4(%esp),%ebx\n\t"
2263 "lea 0x8(%esp),%esp");
2264 }
2265
2266 static void
2267 i386_emit_bit_or (void)
2268 {
2269 EMIT_ASM32 (i386_or,
2270 "or (%esp),%eax\n\t"
2271 "or 0x4(%esp),%ebx\n\t"
2272 "lea 0x8(%esp),%esp");
2273 }
2274
2275 static void
2276 i386_emit_bit_xor (void)
2277 {
2278 EMIT_ASM32 (i386_xor,
2279 "xor (%esp),%eax\n\t"
2280 "xor 0x4(%esp),%ebx\n\t"
2281 "lea 0x8(%esp),%esp");
2282 }
2283
2284 static void
2285 i386_emit_bit_not (void)
2286 {
2287 EMIT_ASM32 (i386_bit_not,
2288 "xor $0xffffffff,%eax\n\t"
2289 "xor $0xffffffff,%ebx\n\t");
2290 }
2291
2292 static void
2293 i386_emit_equal (void)
2294 {
2295 EMIT_ASM32 (i386_equal,
2296 "cmpl %ebx,4(%esp)\n\t"
2297 "jne .Li386_equal_false\n\t"
2298 "cmpl %eax,(%esp)\n\t"
2299 "je .Li386_equal_true\n\t"
2300 ".Li386_equal_false:\n\t"
2301 "xor %eax,%eax\n\t"
2302 "jmp .Li386_equal_end\n\t"
2303 ".Li386_equal_true:\n\t"
2304 "mov $1,%eax\n\t"
2305 ".Li386_equal_end:\n\t"
2306 "xor %ebx,%ebx\n\t"
2307 "lea 0x8(%esp),%esp");
2308 }
2309
2310 static void
2311 i386_emit_less_signed (void)
2312 {
2313 EMIT_ASM32 (i386_less_signed,
2314 "cmpl %ebx,4(%esp)\n\t"
2315 "jl .Li386_less_signed_true\n\t"
2316 "jne .Li386_less_signed_false\n\t"
2317 "cmpl %eax,(%esp)\n\t"
2318 "jl .Li386_less_signed_true\n\t"
2319 ".Li386_less_signed_false:\n\t"
2320 "xor %eax,%eax\n\t"
2321 "jmp .Li386_less_signed_end\n\t"
2322 ".Li386_less_signed_true:\n\t"
2323 "mov $1,%eax\n\t"
2324 ".Li386_less_signed_end:\n\t"
2325 "xor %ebx,%ebx\n\t"
2326 "lea 0x8(%esp),%esp");
2327 }
2328
2329 static void
2330 i386_emit_less_unsigned (void)
2331 {
2332 EMIT_ASM32 (i386_less_unsigned,
2333 "cmpl %ebx,4(%esp)\n\t"
2334 "jb .Li386_less_unsigned_true\n\t"
2335 "jne .Li386_less_unsigned_false\n\t"
2336 "cmpl %eax,(%esp)\n\t"
2337 "jb .Li386_less_unsigned_true\n\t"
2338 ".Li386_less_unsigned_false:\n\t"
2339 "xor %eax,%eax\n\t"
2340 "jmp .Li386_less_unsigned_end\n\t"
2341 ".Li386_less_unsigned_true:\n\t"
2342 "mov $1,%eax\n\t"
2343 ".Li386_less_unsigned_end:\n\t"
2344 "xor %ebx,%ebx\n\t"
2345 "lea 0x8(%esp),%esp");
2346 }
2347
2348 static void
2349 i386_emit_ref (int size)
2350 {
2351 switch (size)
2352 {
2353 case 1:
2354 EMIT_ASM32 (i386_ref1,
2355 "movb (%eax),%al");
2356 break;
2357 case 2:
2358 EMIT_ASM32 (i386_ref2,
2359 "movw (%eax),%ax");
2360 break;
2361 case 4:
2362 EMIT_ASM32 (i386_ref4,
2363 "movl (%eax),%eax");
2364 break;
2365 case 8:
2366 EMIT_ASM32 (i386_ref8,
2367 "movl 4(%eax),%ebx\n\t"
2368 "movl (%eax),%eax");
2369 break;
2370 }
2371 }
2372
2373 static void
2374 i386_emit_if_goto (int *offset_p, int *size_p)
2375 {
2376 EMIT_ASM32 (i386_if_goto,
2377 "mov %eax,%ecx\n\t"
2378 "or %ebx,%ecx\n\t"
2379 "pop %eax\n\t"
2380 "pop %ebx\n\t"
2381 "cmpl $0,%ecx\n\t"
2382 /* Don't trust the assembler to choose the right jump */
2383 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2384
2385 if (offset_p)
2386 *offset_p = 11; /* be sure that this matches the sequence above */
2387 if (size_p)
2388 *size_p = 4;
2389 }
2390
2391 static void
2392 i386_emit_goto (int *offset_p, int *size_p)
2393 {
2394 EMIT_ASM32 (i386_goto,
2395 /* Don't trust the assembler to choose the right jump */
2396 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2397 if (offset_p)
2398 *offset_p = 1;
2399 if (size_p)
2400 *size_p = 4;
2401 }
2402
2403 static void
2404 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2405 {
2406 int diff = (to - (from + size));
2407 unsigned char buf[sizeof (int)];
2408
2409 /* We're only doing 4-byte sizes at the moment. */
2410 if (size != 4)
2411 {
2412 emit_error = 1;
2413 return;
2414 }
2415
2416 memcpy (buf, &diff, sizeof (int));
2417 target_write_memory (from, buf, sizeof (int));
2418 }
2419
2420 static void
2421 i386_emit_const (LONGEST num)
2422 {
2423 unsigned char buf[16];
2424 int i, hi, lo;
2425 CORE_ADDR buildaddr = current_insn_ptr;
2426
2427 i = 0;
2428 buf[i++] = 0xb8; /* mov $<n>,%eax */
2429 lo = num & 0xffffffff;
2430 memcpy (&buf[i], &lo, sizeof (lo));
2431 i += 4;
2432 hi = ((num >> 32) & 0xffffffff);
2433 if (hi)
2434 {
2435 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2436 memcpy (&buf[i], &hi, sizeof (hi));
2437 i += 4;
2438 }
2439 else
2440 {
2441 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2442 }
2443 append_insns (&buildaddr, i, buf);
2444 current_insn_ptr = buildaddr;
2445 }
2446
2447 static void
2448 i386_emit_call (CORE_ADDR fn)
2449 {
2450 unsigned char buf[16];
2451 int i, offset;
2452 CORE_ADDR buildaddr;
2453
2454 buildaddr = current_insn_ptr;
2455 i = 0;
2456 buf[i++] = 0xe8; /* call <reladdr> */
2457 offset = ((int) fn) - (buildaddr + 5);
2458 memcpy (buf + 1, &offset, 4);
2459 append_insns (&buildaddr, 5, buf);
2460 current_insn_ptr = buildaddr;
2461 }
2462
2463 static void
2464 i386_emit_reg (int reg)
2465 {
2466 unsigned char buf[16];
2467 int i;
2468 CORE_ADDR buildaddr;
2469
2470 EMIT_ASM32 (i386_reg_a,
2471 "sub $0x8,%esp");
2472 buildaddr = current_insn_ptr;
2473 i = 0;
2474 buf[i++] = 0xb8; /* mov $<n>,%eax */
2475 memcpy (&buf[i], &reg, sizeof (reg));
2476 i += 4;
2477 append_insns (&buildaddr, i, buf);
2478 current_insn_ptr = buildaddr;
2479 EMIT_ASM32 (i386_reg_b,
2480 "mov %eax,4(%esp)\n\t"
2481 "mov 8(%ebp),%eax\n\t"
2482 "mov %eax,(%esp)");
2483 i386_emit_call (get_raw_reg_func_addr ());
2484 EMIT_ASM32 (i386_reg_c,
2485 "xor %ebx,%ebx\n\t"
2486 "lea 0x8(%esp),%esp");
2487 }
2488
2489 static void
2490 i386_emit_pop (void)
2491 {
2492 EMIT_ASM32 (i386_pop,
2493 "pop %eax\n\t"
2494 "pop %ebx");
2495 }
2496
2497 static void
2498 i386_emit_stack_flush (void)
2499 {
2500 EMIT_ASM32 (i386_stack_flush,
2501 "push %ebx\n\t"
2502 "push %eax");
2503 }
2504
2505 static void
2506 i386_emit_zero_ext (int arg)
2507 {
2508 switch (arg)
2509 {
2510 case 8:
2511 EMIT_ASM32 (i386_zero_ext_8,
2512 "and $0xff,%eax\n\t"
2513 "xor %ebx,%ebx");
2514 break;
2515 case 16:
2516 EMIT_ASM32 (i386_zero_ext_16,
2517 "and $0xffff,%eax\n\t"
2518 "xor %ebx,%ebx");
2519 break;
2520 case 32:
2521 EMIT_ASM32 (i386_zero_ext_32,
2522 "xor %ebx,%ebx");
2523 break;
2524 default:
2525 emit_error = 1;
2526 }
2527 }
2528
2529 static void
2530 i386_emit_swap (void)
2531 {
2532 EMIT_ASM32 (i386_swap,
2533 "mov %eax,%ecx\n\t"
2534 "mov %ebx,%edx\n\t"
2535 "pop %eax\n\t"
2536 "pop %ebx\n\t"
2537 "push %edx\n\t"
2538 "push %ecx");
2539 }
2540
2541 static void
2542 i386_emit_stack_adjust (int n)
2543 {
2544 unsigned char buf[16];
2545 int i;
2546 CORE_ADDR buildaddr = current_insn_ptr;
2547
2548 i = 0;
2549 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2550 buf[i++] = 0x64;
2551 buf[i++] = 0x24;
2552 buf[i++] = n * 8;
2553 append_insns (&buildaddr, i, buf);
2554 current_insn_ptr = buildaddr;
2555 }
2556
2557 /* FN's prototype is `LONGEST(*fn)(int)'. */
2558
2559 static void
2560 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2561 {
2562 unsigned char buf[16];
2563 int i;
2564 CORE_ADDR buildaddr;
2565
2566 EMIT_ASM32 (i386_int_call_1_a,
2567 /* Reserve a bit of stack space. */
2568 "sub $0x8,%esp");
2569 /* Put the one argument on the stack. */
2570 buildaddr = current_insn_ptr;
2571 i = 0;
2572 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2573 buf[i++] = 0x04;
2574 buf[i++] = 0x24;
2575 memcpy (&buf[i], &arg1, sizeof (arg1));
2576 i += 4;
2577 append_insns (&buildaddr, i, buf);
2578 current_insn_ptr = buildaddr;
2579 i386_emit_call (fn);
2580 EMIT_ASM32 (i386_int_call_1_c,
2581 "mov %edx,%ebx\n\t"
2582 "lea 0x8(%esp),%esp");
2583 }
2584
2585 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2586
2587 static void
2588 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2589 {
2590 unsigned char buf[16];
2591 int i;
2592 CORE_ADDR buildaddr;
2593
2594 EMIT_ASM32 (i386_void_call_2_a,
2595 /* Preserve %eax only; we don't have to worry about %ebx. */
2596 "push %eax\n\t"
2597 /* Reserve a bit of stack space for arguments. */
2598 "sub $0x10,%esp\n\t"
2599 /* Copy "top" to the second argument position. (Note that
2600 we can't assume function won't scribble on its
2601 arguments, so don't try to restore from this.) */
2602 "mov %eax,4(%esp)\n\t"
2603 "mov %ebx,8(%esp)");
2604 /* Put the first argument on the stack. */
2605 buildaddr = current_insn_ptr;
2606 i = 0;
2607 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2608 buf[i++] = 0x04;
2609 buf[i++] = 0x24;
2610 memcpy (&buf[i], &arg1, sizeof (arg1));
2611 i += 4;
2612 append_insns (&buildaddr, i, buf);
2613 current_insn_ptr = buildaddr;
2614 i386_emit_call (fn);
2615 EMIT_ASM32 (i386_void_call_2_b,
2616 "lea 0x10(%esp),%esp\n\t"
2617 /* Restore original stack top. */
2618 "pop %eax");
2619 }
2620
2621
2622 static void
2623 i386_emit_eq_goto (int *offset_p, int *size_p)
2624 {
2625 EMIT_ASM32 (eq,
2626 /* Check low half first, more likely to be decider */
2627 "cmpl %eax,(%esp)\n\t"
2628 "jne .Leq_fallthru\n\t"
2629 "cmpl %ebx,4(%esp)\n\t"
2630 "jne .Leq_fallthru\n\t"
2631 "lea 0x8(%esp),%esp\n\t"
2632 "pop %eax\n\t"
2633 "pop %ebx\n\t"
2634 /* jmp, but don't trust the assembler to choose the right jump */
2635 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2636 ".Leq_fallthru:\n\t"
2637 "lea 0x8(%esp),%esp\n\t"
2638 "pop %eax\n\t"
2639 "pop %ebx");
2640
2641 if (offset_p)
2642 *offset_p = 18;
2643 if (size_p)
2644 *size_p = 4;
2645 }
2646
2647 static void
2648 i386_emit_ne_goto (int *offset_p, int *size_p)
2649 {
2650 EMIT_ASM32 (ne,
2651 /* Check low half first, more likely to be decider */
2652 "cmpl %eax,(%esp)\n\t"
2653 "jne .Lne_jump\n\t"
2654 "cmpl %ebx,4(%esp)\n\t"
2655 "je .Lne_fallthru\n\t"
2656 ".Lne_jump:\n\t"
2657 "lea 0x8(%esp),%esp\n\t"
2658 "pop %eax\n\t"
2659 "pop %ebx\n\t"
2660 /* jmp, but don't trust the assembler to choose the right jump */
2661 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2662 ".Lne_fallthru:\n\t"
2663 "lea 0x8(%esp),%esp\n\t"
2664 "pop %eax\n\t"
2665 "pop %ebx");
2666
2667 if (offset_p)
2668 *offset_p = 18;
2669 if (size_p)
2670 *size_p = 4;
2671 }
2672
2673 static void
2674 i386_emit_lt_goto (int *offset_p, int *size_p)
2675 {
2676 EMIT_ASM32 (lt,
2677 "cmpl %ebx,4(%esp)\n\t"
2678 "jl .Llt_jump\n\t"
2679 "jne .Llt_fallthru\n\t"
2680 "cmpl %eax,(%esp)\n\t"
2681 "jnl .Llt_fallthru\n\t"
2682 ".Llt_jump:\n\t"
2683 "lea 0x8(%esp),%esp\n\t"
2684 "pop %eax\n\t"
2685 "pop %ebx\n\t"
2686 /* jmp, but don't trust the assembler to choose the right jump */
2687 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2688 ".Llt_fallthru:\n\t"
2689 "lea 0x8(%esp),%esp\n\t"
2690 "pop %eax\n\t"
2691 "pop %ebx");
2692
2693 if (offset_p)
2694 *offset_p = 20;
2695 if (size_p)
2696 *size_p = 4;
2697 }
2698
2699 static void
2700 i386_emit_le_goto (int *offset_p, int *size_p)
2701 {
2702 EMIT_ASM32 (le,
2703 "cmpl %ebx,4(%esp)\n\t"
2704 "jle .Lle_jump\n\t"
2705 "jne .Lle_fallthru\n\t"
2706 "cmpl %eax,(%esp)\n\t"
2707 "jnle .Lle_fallthru\n\t"
2708 ".Lle_jump:\n\t"
2709 "lea 0x8(%esp),%esp\n\t"
2710 "pop %eax\n\t"
2711 "pop %ebx\n\t"
2712 /* jmp, but don't trust the assembler to choose the right jump */
2713 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2714 ".Lle_fallthru:\n\t"
2715 "lea 0x8(%esp),%esp\n\t"
2716 "pop %eax\n\t"
2717 "pop %ebx");
2718
2719 if (offset_p)
2720 *offset_p = 20;
2721 if (size_p)
2722 *size_p = 4;
2723 }
2724
2725 static void
2726 i386_emit_gt_goto (int *offset_p, int *size_p)
2727 {
2728 EMIT_ASM32 (gt,
2729 "cmpl %ebx,4(%esp)\n\t"
2730 "jg .Lgt_jump\n\t"
2731 "jne .Lgt_fallthru\n\t"
2732 "cmpl %eax,(%esp)\n\t"
2733 "jng .Lgt_fallthru\n\t"
2734 ".Lgt_jump:\n\t"
2735 "lea 0x8(%esp),%esp\n\t"
2736 "pop %eax\n\t"
2737 "pop %ebx\n\t"
2738 /* jmp, but don't trust the assembler to choose the right jump */
2739 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2740 ".Lgt_fallthru:\n\t"
2741 "lea 0x8(%esp),%esp\n\t"
2742 "pop %eax\n\t"
2743 "pop %ebx");
2744
2745 if (offset_p)
2746 *offset_p = 20;
2747 if (size_p)
2748 *size_p = 4;
2749 }
2750
2751 static void
2752 i386_emit_ge_goto (int *offset_p, int *size_p)
2753 {
2754 EMIT_ASM32 (ge,
2755 "cmpl %ebx,4(%esp)\n\t"
2756 "jge .Lge_jump\n\t"
2757 "jne .Lge_fallthru\n\t"
2758 "cmpl %eax,(%esp)\n\t"
2759 "jnge .Lge_fallthru\n\t"
2760 ".Lge_jump:\n\t"
2761 "lea 0x8(%esp),%esp\n\t"
2762 "pop %eax\n\t"
2763 "pop %ebx\n\t"
2764 /* jmp, but don't trust the assembler to choose the right jump */
2765 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2766 ".Lge_fallthru:\n\t"
2767 "lea 0x8(%esp),%esp\n\t"
2768 "pop %eax\n\t"
2769 "pop %ebx");
2770
2771 if (offset_p)
2772 *offset_p = 20;
2773 if (size_p)
2774 *size_p = 4;
2775 }
2776
2777 struct emit_ops i386_emit_ops =
2778 {
2779 i386_emit_prologue,
2780 i386_emit_epilogue,
2781 i386_emit_add,
2782 i386_emit_sub,
2783 i386_emit_mul,
2784 i386_emit_lsh,
2785 i386_emit_rsh_signed,
2786 i386_emit_rsh_unsigned,
2787 i386_emit_ext,
2788 i386_emit_log_not,
2789 i386_emit_bit_and,
2790 i386_emit_bit_or,
2791 i386_emit_bit_xor,
2792 i386_emit_bit_not,
2793 i386_emit_equal,
2794 i386_emit_less_signed,
2795 i386_emit_less_unsigned,
2796 i386_emit_ref,
2797 i386_emit_if_goto,
2798 i386_emit_goto,
2799 i386_write_goto_address,
2800 i386_emit_const,
2801 i386_emit_call,
2802 i386_emit_reg,
2803 i386_emit_pop,
2804 i386_emit_stack_flush,
2805 i386_emit_zero_ext,
2806 i386_emit_swap,
2807 i386_emit_stack_adjust,
2808 i386_emit_int_call_1,
2809 i386_emit_void_call_2,
2810 i386_emit_eq_goto,
2811 i386_emit_ne_goto,
2812 i386_emit_lt_goto,
2813 i386_emit_le_goto,
2814 i386_emit_gt_goto,
2815 i386_emit_ge_goto
2816 };
2817
2818
2819 static struct emit_ops *
2820 x86_emit_ops (void)
2821 {
2822 #ifdef __x86_64__
2823 if (is_64bit_tdesc ())
2824 return &amd64_emit_ops;
2825 else
2826 #endif
2827 return &i386_emit_ops;
2828 }
2829
2830 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2831
2832 static const gdb_byte *
2833 x86_sw_breakpoint_from_kind (int kind, int *size)
2834 {
2835 *size = x86_breakpoint_len;
2836 return x86_breakpoint;
2837 }
2838
2839 static int
2840 x86_supports_range_stepping (void)
2841 {
2842 return 1;
2843 }
2844
2845 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2846 */
2847
2848 static int
2849 x86_supports_hardware_single_step (void)
2850 {
2851 return 1;
2852 }
2853
2854 static int
2855 x86_get_ipa_tdesc_idx (void)
2856 {
2857 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2858 const struct target_desc *tdesc = regcache->tdesc;
2859
2860 #ifdef __x86_64__
2861 return amd64_get_ipa_tdesc_idx (tdesc);
2862 #endif
2863
2864 if (tdesc == tdesc_i386_linux_no_xml)
2865 return X86_TDESC_SSE;
2866
2867 return i386_get_ipa_tdesc_idx (tdesc);
2868 }
2869
2870 /* This is initialized assuming an amd64 target.
2871 x86_arch_setup will correct it for i386 or amd64 targets. */
2872
2873 struct linux_target_ops the_low_target =
2874 {
2875 x86_arch_setup,
2876 x86_linux_regs_info,
2877 x86_cannot_fetch_register,
2878 x86_cannot_store_register,
2879 NULL, /* fetch_register */
2880 x86_get_pc,
2881 x86_set_pc,
2882 NULL, /* breakpoint_kind_from_pc */
2883 x86_sw_breakpoint_from_kind,
2884 NULL,
2885 1,
2886 x86_breakpoint_at,
2887 x86_supports_z_point_type,
2888 x86_insert_point,
2889 x86_remove_point,
2890 x86_stopped_by_watchpoint,
2891 x86_stopped_data_address,
2892 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2893 native i386 case (no registers smaller than an xfer unit), and are not
2894 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2895 NULL,
2896 NULL,
2897 /* need to fix up i386 siginfo if host is amd64 */
2898 x86_siginfo_fixup,
2899 x86_linux_new_process,
2900 x86_linux_delete_process,
2901 x86_linux_new_thread,
2902 x86_linux_delete_thread,
2903 x86_linux_new_fork,
2904 x86_linux_prepare_to_resume,
2905 x86_linux_process_qsupported,
2906 x86_supports_tracepoints,
2907 x86_get_thread_area,
2908 x86_install_fast_tracepoint_jump_pad,
2909 x86_emit_ops,
2910 x86_get_min_fast_tracepoint_insn_len,
2911 x86_supports_range_stepping,
2912 NULL, /* breakpoint_kind_from_current_state */
2913 x86_supports_hardware_single_step,
2914 x86_get_syscall_trapinfo,
2915 x86_get_ipa_tdesc_idx,
2916 };
2917
2918 /* The linux target ops object. */
2919
2920 linux_process_target *the_linux_target = &the_x86_target;
2921
2922 void
2923 initialize_low_arch (void)
2924 {
2925 /* Initialize the Linux target descriptions. */
2926 #ifdef __x86_64__
2927 tdesc_amd64_linux_no_xml = allocate_target_description ();
2928 copy_target_description (tdesc_amd64_linux_no_xml,
2929 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2930 false));
2931 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2932 #endif
2933
2934 tdesc_i386_linux_no_xml = allocate_target_description ();
2935 copy_target_description (tdesc_i386_linux_no_xml,
2936 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2937 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2938
2939 initialize_regsets_info (&x86_regsets_info);
2940 }
This page took 0.086332 seconds and 5 git commands to generate.