linux low: Make the arch code free arch_process_info
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
76 #include <sys/uio.h>
77
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
81
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
86
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
95
96 /* Per-process arch-specific data we want to keep. */
97
98 struct arch_process_info
99 {
100 struct x86_debug_reg_state debug_reg_state;
101 };
102
103 #ifdef __x86_64__
104
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap[] =
109 {
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114 };
115
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
120 #define REGSIZE 8
121
122 static const int x86_64_regmap[] =
123 {
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
137 21 * 8, 22 * 8,
138 #else
139 -1, -1,
140 #endif
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
152 -1 /* pkru */
153 };
154
155 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
156 #define X86_64_USER_REGS (GS + 1)
157
158 #else /* ! __x86_64__ */
159
160 /* Mapping between the general-purpose registers in `struct user'
161 format and GDB's register array layout. */
162 static /*const*/ int i386_regmap[] =
163 {
164 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
165 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
166 EIP * 4, EFL * 4, CS * 4, SS * 4,
167 DS * 4, ES * 4, FS * 4, GS * 4
168 };
169
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171
172 #define REGSIZE 4
173
174 #endif
175
176 #ifdef __x86_64__
177
178 /* Returns true if the current inferior belongs to a x86-64 process,
179 per the tdesc. */
180
181 static int
182 is_64bit_tdesc (void)
183 {
184 struct regcache *regcache = get_thread_regcache (current_thread, 0);
185
186 return register_size (regcache->tdesc, 0) == 8;
187 }
188
189 #endif
190
191 \f
192 /* Called by libthread_db. */
193
194 ps_err_e
195 ps_get_thread_area (struct ps_prochandle *ph,
196 lwpid_t lwpid, int idx, void **base)
197 {
198 #ifdef __x86_64__
199 int use_64bit = is_64bit_tdesc ();
200
201 if (use_64bit)
202 {
203 switch (idx)
204 {
205 case FS:
206 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
207 return PS_OK;
208 break;
209 case GS:
210 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
211 return PS_OK;
212 break;
213 default:
214 return PS_BADADDR;
215 }
216 return PS_ERR;
217 }
218 #endif
219
220 {
221 unsigned int desc[4];
222
223 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
224 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
225 return PS_ERR;
226
227 /* Ensure we properly extend the value to 64-bits for x86_64. */
228 *base = (void *) (uintptr_t) desc[1];
229 return PS_OK;
230 }
231 }
232
233 /* Get the thread area address. This is used to recognize which
234 thread is which when tracing with the in-process agent library. We
235 don't read anything from the address, and treat it as opaque; it's
236 the address itself that we assume is unique per-thread. */
237
238 static int
239 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
240 {
241 #ifdef __x86_64__
242 int use_64bit = is_64bit_tdesc ();
243
244 if (use_64bit)
245 {
246 void *base;
247 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
248 {
249 *addr = (CORE_ADDR) (uintptr_t) base;
250 return 0;
251 }
252
253 return -1;
254 }
255 #endif
256
257 {
258 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
259 struct thread_info *thr = get_lwp_thread (lwp);
260 struct regcache *regcache = get_thread_regcache (thr, 1);
261 unsigned int desc[4];
262 ULONGEST gs = 0;
263 const int reg_thread_area = 3; /* bits to scale down register value. */
264 int idx;
265
266 collect_register_by_name (regcache, "gs", &gs);
267
268 idx = gs >> reg_thread_area;
269
270 if (ptrace (PTRACE_GET_THREAD_AREA,
271 lwpid_of (thr),
272 (void *) (long) idx, (unsigned long) &desc) < 0)
273 return -1;
274
275 *addr = desc[1];
276 return 0;
277 }
278 }
279
280
281 \f
282 static int
283 x86_cannot_store_register (int regno)
284 {
285 #ifdef __x86_64__
286 if (is_64bit_tdesc ())
287 return 0;
288 #endif
289
290 return regno >= I386_NUM_REGS;
291 }
292
293 static int
294 x86_cannot_fetch_register (int regno)
295 {
296 #ifdef __x86_64__
297 if (is_64bit_tdesc ())
298 return 0;
299 #endif
300
301 return regno >= I386_NUM_REGS;
302 }
303
304 static void
305 x86_fill_gregset (struct regcache *regcache, void *buf)
306 {
307 int i;
308
309 #ifdef __x86_64__
310 if (register_size (regcache->tdesc, 0) == 8)
311 {
312 for (i = 0; i < X86_64_NUM_REGS; i++)
313 if (x86_64_regmap[i] != -1)
314 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
315
316 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
317 {
318 unsigned long base;
319 int lwpid = lwpid_of (current_thread);
320
321 collect_register_by_name (regcache, "fs_base", &base);
322 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
323
324 collect_register_by_name (regcache, "gs_base", &base);
325 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
326 }
327 #endif
328
329 return;
330 }
331
332 /* 32-bit inferior registers need to be zero-extended.
333 Callers would read uninitialized memory otherwise. */
334 memset (buf, 0x00, X86_64_USER_REGS * 8);
335 #endif
336
337 for (i = 0; i < I386_NUM_REGS; i++)
338 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
339
340 collect_register_by_name (regcache, "orig_eax",
341 ((char *) buf) + ORIG_EAX * REGSIZE);
342 }
343
344 static void
345 x86_store_gregset (struct regcache *regcache, const void *buf)
346 {
347 int i;
348
349 #ifdef __x86_64__
350 if (register_size (regcache->tdesc, 0) == 8)
351 {
352 for (i = 0; i < X86_64_NUM_REGS; i++)
353 if (x86_64_regmap[i] != -1)
354 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
355
356 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
357 {
358 unsigned long base;
359 int lwpid = lwpid_of (current_thread);
360
361 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
362 supply_register_by_name (regcache, "fs_base", &base);
363
364 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
365 supply_register_by_name (regcache, "gs_base", &base);
366 }
367 #endif
368 return;
369 }
370 #endif
371
372 for (i = 0; i < I386_NUM_REGS; i++)
373 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
374
375 supply_register_by_name (regcache, "orig_eax",
376 ((char *) buf) + ORIG_EAX * REGSIZE);
377 }
378
379 static void
380 x86_fill_fpregset (struct regcache *regcache, void *buf)
381 {
382 #ifdef __x86_64__
383 i387_cache_to_fxsave (regcache, buf);
384 #else
385 i387_cache_to_fsave (regcache, buf);
386 #endif
387 }
388
389 static void
390 x86_store_fpregset (struct regcache *regcache, const void *buf)
391 {
392 #ifdef __x86_64__
393 i387_fxsave_to_cache (regcache, buf);
394 #else
395 i387_fsave_to_cache (regcache, buf);
396 #endif
397 }
398
399 #ifndef __x86_64__
400
401 static void
402 x86_fill_fpxregset (struct regcache *regcache, void *buf)
403 {
404 i387_cache_to_fxsave (regcache, buf);
405 }
406
407 static void
408 x86_store_fpxregset (struct regcache *regcache, const void *buf)
409 {
410 i387_fxsave_to_cache (regcache, buf);
411 }
412
413 #endif
414
415 static void
416 x86_fill_xstateregset (struct regcache *regcache, void *buf)
417 {
418 i387_cache_to_xsave (regcache, buf);
419 }
420
421 static void
422 x86_store_xstateregset (struct regcache *regcache, const void *buf)
423 {
424 i387_xsave_to_cache (regcache, buf);
425 }
426
427 /* ??? The non-biarch i386 case stores all the i387 regs twice.
428 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
429 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
430 doesn't work. IWBN to avoid the duplication in the case where it
431 does work. Maybe the arch_setup routine could check whether it works
432 and update the supported regsets accordingly. */
433
434 static struct regset_info x86_regsets[] =
435 {
436 #ifdef HAVE_PTRACE_GETREGS
437 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
438 GENERAL_REGS,
439 x86_fill_gregset, x86_store_gregset },
440 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
441 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
442 # ifndef __x86_64__
443 # ifdef HAVE_PTRACE_GETFPXREGS
444 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
445 EXTENDED_REGS,
446 x86_fill_fpxregset, x86_store_fpxregset },
447 # endif
448 # endif
449 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
450 FP_REGS,
451 x86_fill_fpregset, x86_store_fpregset },
452 #endif /* HAVE_PTRACE_GETREGS */
453 NULL_REGSET
454 };
455
456 static CORE_ADDR
457 x86_get_pc (struct regcache *regcache)
458 {
459 int use_64bit = register_size (regcache->tdesc, 0) == 8;
460
461 if (use_64bit)
462 {
463 uint64_t pc;
464
465 collect_register_by_name (regcache, "rip", &pc);
466 return (CORE_ADDR) pc;
467 }
468 else
469 {
470 uint32_t pc;
471
472 collect_register_by_name (regcache, "eip", &pc);
473 return (CORE_ADDR) pc;
474 }
475 }
476
477 static void
478 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
479 {
480 int use_64bit = register_size (regcache->tdesc, 0) == 8;
481
482 if (use_64bit)
483 {
484 uint64_t newpc = pc;
485
486 supply_register_by_name (regcache, "rip", &newpc);
487 }
488 else
489 {
490 uint32_t newpc = pc;
491
492 supply_register_by_name (regcache, "eip", &newpc);
493 }
494 }
495 \f
496 static const gdb_byte x86_breakpoint[] = { 0xCC };
497 #define x86_breakpoint_len 1
498
499 static int
500 x86_breakpoint_at (CORE_ADDR pc)
501 {
502 unsigned char c;
503
504 (*the_target->read_memory) (pc, &c, 1);
505 if (c == 0xCC)
506 return 1;
507
508 return 0;
509 }
510 \f
511 /* Low-level function vector. */
512 struct x86_dr_low_type x86_dr_low =
513 {
514 x86_linux_dr_set_control,
515 x86_linux_dr_set_addr,
516 x86_linux_dr_get_addr,
517 x86_linux_dr_get_status,
518 x86_linux_dr_get_control,
519 sizeof (void *),
520 };
521 \f
522 /* Breakpoint/Watchpoint support. */
523
524 static int
525 x86_supports_z_point_type (char z_type)
526 {
527 switch (z_type)
528 {
529 case Z_PACKET_SW_BP:
530 case Z_PACKET_HW_BP:
531 case Z_PACKET_WRITE_WP:
532 case Z_PACKET_ACCESS_WP:
533 return 1;
534 default:
535 return 0;
536 }
537 }
538
539 static int
540 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
541 int size, struct raw_breakpoint *bp)
542 {
543 struct process_info *proc = current_process ();
544
545 switch (type)
546 {
547 case raw_bkpt_type_hw:
548 case raw_bkpt_type_write_wp:
549 case raw_bkpt_type_access_wp:
550 {
551 enum target_hw_bp_type hw_type
552 = raw_bkpt_type_to_target_hw_bp_type (type);
553 struct x86_debug_reg_state *state
554 = &proc->priv->arch_private->debug_reg_state;
555
556 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
557 }
558
559 default:
560 /* Unsupported. */
561 return 1;
562 }
563 }
564
565 static int
566 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
567 int size, struct raw_breakpoint *bp)
568 {
569 struct process_info *proc = current_process ();
570
571 switch (type)
572 {
573 case raw_bkpt_type_hw:
574 case raw_bkpt_type_write_wp:
575 case raw_bkpt_type_access_wp:
576 {
577 enum target_hw_bp_type hw_type
578 = raw_bkpt_type_to_target_hw_bp_type (type);
579 struct x86_debug_reg_state *state
580 = &proc->priv->arch_private->debug_reg_state;
581
582 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
583 }
584 default:
585 /* Unsupported. */
586 return 1;
587 }
588 }
589
590 static int
591 x86_stopped_by_watchpoint (void)
592 {
593 struct process_info *proc = current_process ();
594 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
595 }
596
597 static CORE_ADDR
598 x86_stopped_data_address (void)
599 {
600 struct process_info *proc = current_process ();
601 CORE_ADDR addr;
602 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
603 &addr))
604 return addr;
605 return 0;
606 }
607 \f
608 /* Called when a new process is created. */
609
610 static struct arch_process_info *
611 x86_linux_new_process (void)
612 {
613 struct arch_process_info *info = XCNEW (struct arch_process_info);
614
615 x86_low_init_dregs (&info->debug_reg_state);
616
617 return info;
618 }
619
620 /* Called when a process is being deleted. */
621
622 static void
623 x86_linux_delete_process (struct arch_process_info *info)
624 {
625 xfree (info);
626 }
627
628 /* Target routine for linux_new_fork. */
629
630 static void
631 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
632 {
633 /* These are allocated by linux_add_process. */
634 gdb_assert (parent->priv != NULL
635 && parent->priv->arch_private != NULL);
636 gdb_assert (child->priv != NULL
637 && child->priv->arch_private != NULL);
638
639 /* Linux kernel before 2.6.33 commit
640 72f674d203cd230426437cdcf7dd6f681dad8b0d
641 will inherit hardware debug registers from parent
642 on fork/vfork/clone. Newer Linux kernels create such tasks with
643 zeroed debug registers.
644
645 GDB core assumes the child inherits the watchpoints/hw
646 breakpoints of the parent, and will remove them all from the
647 forked off process. Copy the debug registers mirrors into the
648 new process so that all breakpoints and watchpoints can be
649 removed together. The debug registers mirror will become zeroed
650 in the end before detaching the forked off process, thus making
651 this compatible with older Linux kernels too. */
652
653 *child->priv->arch_private = *parent->priv->arch_private;
654 }
655
656 /* See nat/x86-dregs.h. */
657
658 struct x86_debug_reg_state *
659 x86_debug_reg_state (pid_t pid)
660 {
661 struct process_info *proc = find_process_pid (pid);
662
663 return &proc->priv->arch_private->debug_reg_state;
664 }
665 \f
666 /* When GDBSERVER is built as a 64-bit application on linux, the
667 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
668 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
669 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
670 conversion in-place ourselves. */
671
672 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
673 layout of the inferiors' architecture. Returns true if any
674 conversion was done; false otherwise. If DIRECTION is 1, then copy
675 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
676 INF. */
677
678 static int
679 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
680 {
681 #ifdef __x86_64__
682 unsigned int machine;
683 int tid = lwpid_of (current_thread);
684 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
685
686 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
687 if (!is_64bit_tdesc ())
688 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
689 FIXUP_32);
690 /* No fixup for native x32 GDB. */
691 else if (!is_elf64 && sizeof (void *) == 8)
692 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
693 FIXUP_X32);
694 #endif
695
696 return 0;
697 }
698 \f
699 static int use_xml;
700
701 /* Format of XSAVE extended state is:
702 struct
703 {
704 fxsave_bytes[0..463]
705 sw_usable_bytes[464..511]
706 xstate_hdr_bytes[512..575]
707 avx_bytes[576..831]
708 future_state etc
709 };
710
711 Same memory layout will be used for the coredump NT_X86_XSTATE
712 representing the XSAVE extended state registers.
713
714 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
715 extended state mask, which is the same as the extended control register
716 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
717 together with the mask saved in the xstate_hdr_bytes to determine what
718 states the processor/OS supports and what state, used or initialized,
719 the process/thread is in. */
720 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
721
722 /* Does the current host support the GETFPXREGS request? The header
723 file may or may not define it, and even if it is defined, the
724 kernel will return EIO if it's running on a pre-SSE processor. */
725 int have_ptrace_getfpxregs =
726 #ifdef HAVE_PTRACE_GETFPXREGS
727 -1
728 #else
729 0
730 #endif
731 ;
732
733 /* Get Linux/x86 target description from running target. */
734
735 static const struct target_desc *
736 x86_linux_read_description (void)
737 {
738 unsigned int machine;
739 int is_elf64;
740 int xcr0_features;
741 int tid;
742 static uint64_t xcr0;
743 struct regset_info *regset;
744
745 tid = lwpid_of (current_thread);
746
747 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
748
749 if (sizeof (void *) == 4)
750 {
751 if (is_elf64 > 0)
752 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
753 #ifndef __x86_64__
754 else if (machine == EM_X86_64)
755 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
756 #endif
757 }
758
759 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
760 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
761 {
762 elf_fpxregset_t fpxregs;
763
764 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
765 {
766 have_ptrace_getfpxregs = 0;
767 have_ptrace_getregset = 0;
768 return i386_linux_read_description (X86_XSTATE_X87);
769 }
770 else
771 have_ptrace_getfpxregs = 1;
772 }
773 #endif
774
775 if (!use_xml)
776 {
777 x86_xcr0 = X86_XSTATE_SSE_MASK;
778
779 /* Don't use XML. */
780 #ifdef __x86_64__
781 if (machine == EM_X86_64)
782 return tdesc_amd64_linux_no_xml;
783 else
784 #endif
785 return tdesc_i386_linux_no_xml;
786 }
787
788 if (have_ptrace_getregset == -1)
789 {
790 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
791 struct iovec iov;
792
793 iov.iov_base = xstateregs;
794 iov.iov_len = sizeof (xstateregs);
795
796 /* Check if PTRACE_GETREGSET works. */
797 if (ptrace (PTRACE_GETREGSET, tid,
798 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
799 have_ptrace_getregset = 0;
800 else
801 {
802 have_ptrace_getregset = 1;
803
804 /* Get XCR0 from XSAVE extended state. */
805 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
806 / sizeof (uint64_t))];
807
808 /* Use PTRACE_GETREGSET if it is available. */
809 for (regset = x86_regsets;
810 regset->fill_function != NULL; regset++)
811 if (regset->get_request == PTRACE_GETREGSET)
812 regset->size = X86_XSTATE_SIZE (xcr0);
813 else if (regset->type != GENERAL_REGS)
814 regset->size = 0;
815 }
816 }
817
818 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
819 xcr0_features = (have_ptrace_getregset
820 && (xcr0 & X86_XSTATE_ALL_MASK));
821
822 if (xcr0_features)
823 x86_xcr0 = xcr0;
824
825 if (machine == EM_X86_64)
826 {
827 #ifdef __x86_64__
828 const target_desc *tdesc = NULL;
829
830 if (xcr0_features)
831 {
832 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
833 !is_elf64);
834 }
835
836 if (tdesc == NULL)
837 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
838 return tdesc;
839 #endif
840 }
841 else
842 {
843 const target_desc *tdesc = NULL;
844
845 if (xcr0_features)
846 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
847
848 if (tdesc == NULL)
849 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
850
851 return tdesc;
852 }
853
854 gdb_assert_not_reached ("failed to return tdesc");
855 }
856
857 /* Callback for for_each_inferior. Calls the arch_setup routine for
858 each process. */
859
860 static void
861 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
862 {
863 int pid = ptid_get_pid (entry->id);
864
865 /* Look up any thread of this processes. */
866 current_thread = find_any_thread_of_pid (pid);
867
868 the_low_target.arch_setup ();
869 }
870
871 /* Update all the target description of all processes; a new GDB
872 connected, and it may or not support xml target descriptions. */
873
874 static void
875 x86_linux_update_xmltarget (void)
876 {
877 struct thread_info *saved_thread = current_thread;
878
879 /* Before changing the register cache's internal layout, flush the
880 contents of the current valid caches back to the threads, and
881 release the current regcache objects. */
882 regcache_release ();
883
884 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
885
886 current_thread = saved_thread;
887 }
888
889 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
890 PTRACE_GETREGSET. */
891
892 static void
893 x86_linux_process_qsupported (char **features, int count)
894 {
895 int i;
896
897 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
898 with "i386" in qSupported query, it supports x86 XML target
899 descriptions. */
900 use_xml = 0;
901 for (i = 0; i < count; i++)
902 {
903 const char *feature = features[i];
904
905 if (startswith (feature, "xmlRegisters="))
906 {
907 char *copy = xstrdup (feature + 13);
908 char *p;
909
910 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
911 {
912 if (strcmp (p, "i386") == 0)
913 {
914 use_xml = 1;
915 break;
916 }
917 }
918
919 free (copy);
920 }
921 }
922 x86_linux_update_xmltarget ();
923 }
924
925 /* Common for x86/x86-64. */
926
927 static struct regsets_info x86_regsets_info =
928 {
929 x86_regsets, /* regsets */
930 0, /* num_regsets */
931 NULL, /* disabled_regsets */
932 };
933
934 #ifdef __x86_64__
935 static struct regs_info amd64_linux_regs_info =
936 {
937 NULL, /* regset_bitmap */
938 NULL, /* usrregs_info */
939 &x86_regsets_info
940 };
941 #endif
942 static struct usrregs_info i386_linux_usrregs_info =
943 {
944 I386_NUM_REGS,
945 i386_regmap,
946 };
947
948 static struct regs_info i386_linux_regs_info =
949 {
950 NULL, /* regset_bitmap */
951 &i386_linux_usrregs_info,
952 &x86_regsets_info
953 };
954
955 const struct regs_info *
956 x86_linux_regs_info (void)
957 {
958 #ifdef __x86_64__
959 if (is_64bit_tdesc ())
960 return &amd64_linux_regs_info;
961 else
962 #endif
963 return &i386_linux_regs_info;
964 }
965
966 /* Initialize the target description for the architecture of the
967 inferior. */
968
969 static void
970 x86_arch_setup (void)
971 {
972 current_process ()->tdesc = x86_linux_read_description ();
973 }
974
975 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
976 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
977
978 static void
979 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
980 {
981 int use_64bit = register_size (regcache->tdesc, 0) == 8;
982
983 if (use_64bit)
984 {
985 long l_sysno;
986
987 collect_register_by_name (regcache, "orig_rax", &l_sysno);
988 *sysno = (int) l_sysno;
989 }
990 else
991 collect_register_by_name (regcache, "orig_eax", sysno);
992 }
993
994 static int
995 x86_supports_tracepoints (void)
996 {
997 return 1;
998 }
999
1000 static void
1001 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1002 {
1003 write_inferior_memory (*to, buf, len);
1004 *to += len;
1005 }
1006
1007 static int
1008 push_opcode (unsigned char *buf, const char *op)
1009 {
1010 unsigned char *buf_org = buf;
1011
1012 while (1)
1013 {
1014 char *endptr;
1015 unsigned long ul = strtoul (op, &endptr, 16);
1016
1017 if (endptr == op)
1018 break;
1019
1020 *buf++ = ul;
1021 op = endptr;
1022 }
1023
1024 return buf - buf_org;
1025 }
1026
1027 #ifdef __x86_64__
1028
1029 /* Build a jump pad that saves registers and calls a collection
1030 function. Writes a jump instruction to the jump pad to
1031 JJUMPAD_INSN. The caller is responsible to write it in at the
1032 tracepoint address. */
1033
1034 static int
1035 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1036 CORE_ADDR collector,
1037 CORE_ADDR lockaddr,
1038 ULONGEST orig_size,
1039 CORE_ADDR *jump_entry,
1040 CORE_ADDR *trampoline,
1041 ULONGEST *trampoline_size,
1042 unsigned char *jjump_pad_insn,
1043 ULONGEST *jjump_pad_insn_size,
1044 CORE_ADDR *adjusted_insn_addr,
1045 CORE_ADDR *adjusted_insn_addr_end,
1046 char *err)
1047 {
1048 unsigned char buf[40];
1049 int i, offset;
1050 int64_t loffset;
1051
1052 CORE_ADDR buildaddr = *jump_entry;
1053
1054 /* Build the jump pad. */
1055
1056 /* First, do tracepoint data collection. Save registers. */
1057 i = 0;
1058 /* Need to ensure stack pointer saved first. */
1059 buf[i++] = 0x54; /* push %rsp */
1060 buf[i++] = 0x55; /* push %rbp */
1061 buf[i++] = 0x57; /* push %rdi */
1062 buf[i++] = 0x56; /* push %rsi */
1063 buf[i++] = 0x52; /* push %rdx */
1064 buf[i++] = 0x51; /* push %rcx */
1065 buf[i++] = 0x53; /* push %rbx */
1066 buf[i++] = 0x50; /* push %rax */
1067 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1068 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1069 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1070 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1071 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1072 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1073 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1074 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1075 buf[i++] = 0x9c; /* pushfq */
1076 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1077 buf[i++] = 0xbf;
1078 memcpy (buf + i, &tpaddr, 8);
1079 i += 8;
1080 buf[i++] = 0x57; /* push %rdi */
1081 append_insns (&buildaddr, i, buf);
1082
1083 /* Stack space for the collecting_t object. */
1084 i = 0;
1085 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1086 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1087 memcpy (buf + i, &tpoint, 8);
1088 i += 8;
1089 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1090 i += push_opcode (&buf[i],
1091 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1092 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1093 append_insns (&buildaddr, i, buf);
1094
1095 /* spin-lock. */
1096 i = 0;
1097 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1098 memcpy (&buf[i], (void *) &lockaddr, 8);
1099 i += 8;
1100 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1101 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1102 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1103 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1104 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1105 append_insns (&buildaddr, i, buf);
1106
1107 /* Set up the gdb_collect call. */
1108 /* At this point, (stack pointer + 0x18) is the base of our saved
1109 register block. */
1110
1111 i = 0;
1112 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1113 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1114
1115 /* tpoint address may be 64-bit wide. */
1116 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1117 memcpy (buf + i, &tpoint, 8);
1118 i += 8;
1119 append_insns (&buildaddr, i, buf);
1120
1121 /* The collector function being in the shared library, may be
1122 >31-bits away off the jump pad. */
1123 i = 0;
1124 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1125 memcpy (buf + i, &collector, 8);
1126 i += 8;
1127 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1128 append_insns (&buildaddr, i, buf);
1129
1130 /* Clear the spin-lock. */
1131 i = 0;
1132 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1133 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1134 memcpy (buf + i, &lockaddr, 8);
1135 i += 8;
1136 append_insns (&buildaddr, i, buf);
1137
1138 /* Remove stack that had been used for the collect_t object. */
1139 i = 0;
1140 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1141 append_insns (&buildaddr, i, buf);
1142
1143 /* Restore register state. */
1144 i = 0;
1145 buf[i++] = 0x48; /* add $0x8,%rsp */
1146 buf[i++] = 0x83;
1147 buf[i++] = 0xc4;
1148 buf[i++] = 0x08;
1149 buf[i++] = 0x9d; /* popfq */
1150 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1151 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1152 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1153 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1154 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1155 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1156 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1157 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1158 buf[i++] = 0x58; /* pop %rax */
1159 buf[i++] = 0x5b; /* pop %rbx */
1160 buf[i++] = 0x59; /* pop %rcx */
1161 buf[i++] = 0x5a; /* pop %rdx */
1162 buf[i++] = 0x5e; /* pop %rsi */
1163 buf[i++] = 0x5f; /* pop %rdi */
1164 buf[i++] = 0x5d; /* pop %rbp */
1165 buf[i++] = 0x5c; /* pop %rsp */
1166 append_insns (&buildaddr, i, buf);
1167
1168 /* Now, adjust the original instruction to execute in the jump
1169 pad. */
1170 *adjusted_insn_addr = buildaddr;
1171 relocate_instruction (&buildaddr, tpaddr);
1172 *adjusted_insn_addr_end = buildaddr;
1173
1174 /* Finally, write a jump back to the program. */
1175
1176 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1177 if (loffset > INT_MAX || loffset < INT_MIN)
1178 {
1179 sprintf (err,
1180 "E.Jump back from jump pad too far from tracepoint "
1181 "(offset 0x%" PRIx64 " > int32).", loffset);
1182 return 1;
1183 }
1184
1185 offset = (int) loffset;
1186 memcpy (buf, jump_insn, sizeof (jump_insn));
1187 memcpy (buf + 1, &offset, 4);
1188 append_insns (&buildaddr, sizeof (jump_insn), buf);
1189
1190 /* The jump pad is now built. Wire in a jump to our jump pad. This
1191 is always done last (by our caller actually), so that we can
1192 install fast tracepoints with threads running. This relies on
1193 the agent's atomic write support. */
1194 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1195 if (loffset > INT_MAX || loffset < INT_MIN)
1196 {
1197 sprintf (err,
1198 "E.Jump pad too far from tracepoint "
1199 "(offset 0x%" PRIx64 " > int32).", loffset);
1200 return 1;
1201 }
1202
1203 offset = (int) loffset;
1204
1205 memcpy (buf, jump_insn, sizeof (jump_insn));
1206 memcpy (buf + 1, &offset, 4);
1207 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1208 *jjump_pad_insn_size = sizeof (jump_insn);
1209
1210 /* Return the end address of our pad. */
1211 *jump_entry = buildaddr;
1212
1213 return 0;
1214 }
1215
1216 #endif /* __x86_64__ */
1217
1218 /* Build a jump pad that saves registers and calls a collection
1219 function. Writes a jump instruction to the jump pad to
1220 JJUMPAD_INSN. The caller is responsible to write it in at the
1221 tracepoint address. */
1222
1223 static int
1224 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1225 CORE_ADDR collector,
1226 CORE_ADDR lockaddr,
1227 ULONGEST orig_size,
1228 CORE_ADDR *jump_entry,
1229 CORE_ADDR *trampoline,
1230 ULONGEST *trampoline_size,
1231 unsigned char *jjump_pad_insn,
1232 ULONGEST *jjump_pad_insn_size,
1233 CORE_ADDR *adjusted_insn_addr,
1234 CORE_ADDR *adjusted_insn_addr_end,
1235 char *err)
1236 {
1237 unsigned char buf[0x100];
1238 int i, offset;
1239 CORE_ADDR buildaddr = *jump_entry;
1240
1241 /* Build the jump pad. */
1242
1243 /* First, do tracepoint data collection. Save registers. */
1244 i = 0;
1245 buf[i++] = 0x60; /* pushad */
1246 buf[i++] = 0x68; /* push tpaddr aka $pc */
1247 *((int *)(buf + i)) = (int) tpaddr;
1248 i += 4;
1249 buf[i++] = 0x9c; /* pushf */
1250 buf[i++] = 0x1e; /* push %ds */
1251 buf[i++] = 0x06; /* push %es */
1252 buf[i++] = 0x0f; /* push %fs */
1253 buf[i++] = 0xa0;
1254 buf[i++] = 0x0f; /* push %gs */
1255 buf[i++] = 0xa8;
1256 buf[i++] = 0x16; /* push %ss */
1257 buf[i++] = 0x0e; /* push %cs */
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Stack space for the collecting_t object. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1263
1264 /* Build the object. */
1265 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1266 memcpy (buf + i, &tpoint, 4);
1267 i += 4;
1268 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1269
1270 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1271 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1272 append_insns (&buildaddr, i, buf);
1273
1274 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1275 If we cared for it, this could be using xchg alternatively. */
1276
1277 i = 0;
1278 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1279 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1280 %esp,<lockaddr> */
1281 memcpy (&buf[i], (void *) &lockaddr, 4);
1282 i += 4;
1283 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1284 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1285 append_insns (&buildaddr, i, buf);
1286
1287
1288 /* Set up arguments to the gdb_collect call. */
1289 i = 0;
1290 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1291 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1292 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1293 append_insns (&buildaddr, i, buf);
1294
1295 i = 0;
1296 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1297 append_insns (&buildaddr, i, buf);
1298
1299 i = 0;
1300 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1301 memcpy (&buf[i], (void *) &tpoint, 4);
1302 i += 4;
1303 append_insns (&buildaddr, i, buf);
1304
1305 buf[0] = 0xe8; /* call <reladdr> */
1306 offset = collector - (buildaddr + sizeof (jump_insn));
1307 memcpy (buf + 1, &offset, 4);
1308 append_insns (&buildaddr, 5, buf);
1309 /* Clean up after the call. */
1310 buf[0] = 0x83; /* add $0x8,%esp */
1311 buf[1] = 0xc4;
1312 buf[2] = 0x08;
1313 append_insns (&buildaddr, 3, buf);
1314
1315
1316 /* Clear the spin-lock. This would need the LOCK prefix on older
1317 broken archs. */
1318 i = 0;
1319 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1320 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1321 memcpy (buf + i, &lockaddr, 4);
1322 i += 4;
1323 append_insns (&buildaddr, i, buf);
1324
1325
1326 /* Remove stack that had been used for the collect_t object. */
1327 i = 0;
1328 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1329 append_insns (&buildaddr, i, buf);
1330
1331 i = 0;
1332 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1333 buf[i++] = 0xc4;
1334 buf[i++] = 0x04;
1335 buf[i++] = 0x17; /* pop %ss */
1336 buf[i++] = 0x0f; /* pop %gs */
1337 buf[i++] = 0xa9;
1338 buf[i++] = 0x0f; /* pop %fs */
1339 buf[i++] = 0xa1;
1340 buf[i++] = 0x07; /* pop %es */
1341 buf[i++] = 0x1f; /* pop %ds */
1342 buf[i++] = 0x9d; /* popf */
1343 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1344 buf[i++] = 0xc4;
1345 buf[i++] = 0x04;
1346 buf[i++] = 0x61; /* popad */
1347 append_insns (&buildaddr, i, buf);
1348
1349 /* Now, adjust the original instruction to execute in the jump
1350 pad. */
1351 *adjusted_insn_addr = buildaddr;
1352 relocate_instruction (&buildaddr, tpaddr);
1353 *adjusted_insn_addr_end = buildaddr;
1354
1355 /* Write the jump back to the program. */
1356 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1357 memcpy (buf, jump_insn, sizeof (jump_insn));
1358 memcpy (buf + 1, &offset, 4);
1359 append_insns (&buildaddr, sizeof (jump_insn), buf);
1360
1361 /* The jump pad is now built. Wire in a jump to our jump pad. This
1362 is always done last (by our caller actually), so that we can
1363 install fast tracepoints with threads running. This relies on
1364 the agent's atomic write support. */
1365 if (orig_size == 4)
1366 {
1367 /* Create a trampoline. */
1368 *trampoline_size = sizeof (jump_insn);
1369 if (!claim_trampoline_space (*trampoline_size, trampoline))
1370 {
1371 /* No trampoline space available. */
1372 strcpy (err,
1373 "E.Cannot allocate trampoline space needed for fast "
1374 "tracepoints on 4-byte instructions.");
1375 return 1;
1376 }
1377
1378 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1379 memcpy (buf, jump_insn, sizeof (jump_insn));
1380 memcpy (buf + 1, &offset, 4);
1381 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1382
1383 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1384 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1385 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1386 memcpy (buf + 2, &offset, 2);
1387 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1388 *jjump_pad_insn_size = sizeof (small_jump_insn);
1389 }
1390 else
1391 {
1392 /* Else use a 32-bit relative jump instruction. */
1393 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1394 memcpy (buf, jump_insn, sizeof (jump_insn));
1395 memcpy (buf + 1, &offset, 4);
1396 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1397 *jjump_pad_insn_size = sizeof (jump_insn);
1398 }
1399
1400 /* Return the end address of our pad. */
1401 *jump_entry = buildaddr;
1402
1403 return 0;
1404 }
1405
1406 static int
1407 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1408 CORE_ADDR collector,
1409 CORE_ADDR lockaddr,
1410 ULONGEST orig_size,
1411 CORE_ADDR *jump_entry,
1412 CORE_ADDR *trampoline,
1413 ULONGEST *trampoline_size,
1414 unsigned char *jjump_pad_insn,
1415 ULONGEST *jjump_pad_insn_size,
1416 CORE_ADDR *adjusted_insn_addr,
1417 CORE_ADDR *adjusted_insn_addr_end,
1418 char *err)
1419 {
1420 #ifdef __x86_64__
1421 if (is_64bit_tdesc ())
1422 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1423 collector, lockaddr,
1424 orig_size, jump_entry,
1425 trampoline, trampoline_size,
1426 jjump_pad_insn,
1427 jjump_pad_insn_size,
1428 adjusted_insn_addr,
1429 adjusted_insn_addr_end,
1430 err);
1431 #endif
1432
1433 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1434 collector, lockaddr,
1435 orig_size, jump_entry,
1436 trampoline, trampoline_size,
1437 jjump_pad_insn,
1438 jjump_pad_insn_size,
1439 adjusted_insn_addr,
1440 adjusted_insn_addr_end,
1441 err);
1442 }
1443
1444 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1445 architectures. */
1446
1447 static int
1448 x86_get_min_fast_tracepoint_insn_len (void)
1449 {
1450 static int warned_about_fast_tracepoints = 0;
1451
1452 #ifdef __x86_64__
1453 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1454 used for fast tracepoints. */
1455 if (is_64bit_tdesc ())
1456 return 5;
1457 #endif
1458
1459 if (agent_loaded_p ())
1460 {
1461 char errbuf[IPA_BUFSIZ];
1462
1463 errbuf[0] = '\0';
1464
1465 /* On x86, if trampolines are available, then 4-byte jump instructions
1466 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1467 with a 4-byte offset are used instead. */
1468 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1469 return 4;
1470 else
1471 {
1472 /* GDB has no channel to explain to user why a shorter fast
1473 tracepoint is not possible, but at least make GDBserver
1474 mention that something has gone awry. */
1475 if (!warned_about_fast_tracepoints)
1476 {
1477 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1478 warned_about_fast_tracepoints = 1;
1479 }
1480 return 5;
1481 }
1482 }
1483 else
1484 {
1485 /* Indicate that the minimum length is currently unknown since the IPA
1486 has not loaded yet. */
1487 return 0;
1488 }
1489 }
1490
1491 static void
1492 add_insns (unsigned char *start, int len)
1493 {
1494 CORE_ADDR buildaddr = current_insn_ptr;
1495
1496 if (debug_threads)
1497 debug_printf ("Adding %d bytes of insn at %s\n",
1498 len, paddress (buildaddr));
1499
1500 append_insns (&buildaddr, len, start);
1501 current_insn_ptr = buildaddr;
1502 }
1503
1504 /* Our general strategy for emitting code is to avoid specifying raw
1505 bytes whenever possible, and instead copy a block of inline asm
1506 that is embedded in the function. This is a little messy, because
1507 we need to keep the compiler from discarding what looks like dead
1508 code, plus suppress various warnings. */
1509
1510 #define EMIT_ASM(NAME, INSNS) \
1511 do \
1512 { \
1513 extern unsigned char start_ ## NAME, end_ ## NAME; \
1514 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1515 __asm__ ("jmp end_" #NAME "\n" \
1516 "\t" "start_" #NAME ":" \
1517 "\t" INSNS "\n" \
1518 "\t" "end_" #NAME ":"); \
1519 } while (0)
1520
1521 #ifdef __x86_64__
1522
1523 #define EMIT_ASM32(NAME,INSNS) \
1524 do \
1525 { \
1526 extern unsigned char start_ ## NAME, end_ ## NAME; \
1527 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1528 __asm__ (".code32\n" \
1529 "\t" "jmp end_" #NAME "\n" \
1530 "\t" "start_" #NAME ":\n" \
1531 "\t" INSNS "\n" \
1532 "\t" "end_" #NAME ":\n" \
1533 ".code64\n"); \
1534 } while (0)
1535
1536 #else
1537
1538 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1539
1540 #endif
1541
1542 #ifdef __x86_64__
1543
1544 static void
1545 amd64_emit_prologue (void)
1546 {
1547 EMIT_ASM (amd64_prologue,
1548 "pushq %rbp\n\t"
1549 "movq %rsp,%rbp\n\t"
1550 "sub $0x20,%rsp\n\t"
1551 "movq %rdi,-8(%rbp)\n\t"
1552 "movq %rsi,-16(%rbp)");
1553 }
1554
1555
1556 static void
1557 amd64_emit_epilogue (void)
1558 {
1559 EMIT_ASM (amd64_epilogue,
1560 "movq -16(%rbp),%rdi\n\t"
1561 "movq %rax,(%rdi)\n\t"
1562 "xor %rax,%rax\n\t"
1563 "leave\n\t"
1564 "ret");
1565 }
1566
1567 static void
1568 amd64_emit_add (void)
1569 {
1570 EMIT_ASM (amd64_add,
1571 "add (%rsp),%rax\n\t"
1572 "lea 0x8(%rsp),%rsp");
1573 }
1574
1575 static void
1576 amd64_emit_sub (void)
1577 {
1578 EMIT_ASM (amd64_sub,
1579 "sub %rax,(%rsp)\n\t"
1580 "pop %rax");
1581 }
1582
1583 static void
1584 amd64_emit_mul (void)
1585 {
1586 emit_error = 1;
1587 }
1588
1589 static void
1590 amd64_emit_lsh (void)
1591 {
1592 emit_error = 1;
1593 }
1594
1595 static void
1596 amd64_emit_rsh_signed (void)
1597 {
1598 emit_error = 1;
1599 }
1600
1601 static void
1602 amd64_emit_rsh_unsigned (void)
1603 {
1604 emit_error = 1;
1605 }
1606
1607 static void
1608 amd64_emit_ext (int arg)
1609 {
1610 switch (arg)
1611 {
1612 case 8:
1613 EMIT_ASM (amd64_ext_8,
1614 "cbtw\n\t"
1615 "cwtl\n\t"
1616 "cltq");
1617 break;
1618 case 16:
1619 EMIT_ASM (amd64_ext_16,
1620 "cwtl\n\t"
1621 "cltq");
1622 break;
1623 case 32:
1624 EMIT_ASM (amd64_ext_32,
1625 "cltq");
1626 break;
1627 default:
1628 emit_error = 1;
1629 }
1630 }
1631
1632 static void
1633 amd64_emit_log_not (void)
1634 {
1635 EMIT_ASM (amd64_log_not,
1636 "test %rax,%rax\n\t"
1637 "sete %cl\n\t"
1638 "movzbq %cl,%rax");
1639 }
1640
1641 static void
1642 amd64_emit_bit_and (void)
1643 {
1644 EMIT_ASM (amd64_and,
1645 "and (%rsp),%rax\n\t"
1646 "lea 0x8(%rsp),%rsp");
1647 }
1648
1649 static void
1650 amd64_emit_bit_or (void)
1651 {
1652 EMIT_ASM (amd64_or,
1653 "or (%rsp),%rax\n\t"
1654 "lea 0x8(%rsp),%rsp");
1655 }
1656
1657 static void
1658 amd64_emit_bit_xor (void)
1659 {
1660 EMIT_ASM (amd64_xor,
1661 "xor (%rsp),%rax\n\t"
1662 "lea 0x8(%rsp),%rsp");
1663 }
1664
1665 static void
1666 amd64_emit_bit_not (void)
1667 {
1668 EMIT_ASM (amd64_bit_not,
1669 "xorq $0xffffffffffffffff,%rax");
1670 }
1671
1672 static void
1673 amd64_emit_equal (void)
1674 {
1675 EMIT_ASM (amd64_equal,
1676 "cmp %rax,(%rsp)\n\t"
1677 "je .Lamd64_equal_true\n\t"
1678 "xor %rax,%rax\n\t"
1679 "jmp .Lamd64_equal_end\n\t"
1680 ".Lamd64_equal_true:\n\t"
1681 "mov $0x1,%rax\n\t"
1682 ".Lamd64_equal_end:\n\t"
1683 "lea 0x8(%rsp),%rsp");
1684 }
1685
1686 static void
1687 amd64_emit_less_signed (void)
1688 {
1689 EMIT_ASM (amd64_less_signed,
1690 "cmp %rax,(%rsp)\n\t"
1691 "jl .Lamd64_less_signed_true\n\t"
1692 "xor %rax,%rax\n\t"
1693 "jmp .Lamd64_less_signed_end\n\t"
1694 ".Lamd64_less_signed_true:\n\t"
1695 "mov $1,%rax\n\t"
1696 ".Lamd64_less_signed_end:\n\t"
1697 "lea 0x8(%rsp),%rsp");
1698 }
1699
1700 static void
1701 amd64_emit_less_unsigned (void)
1702 {
1703 EMIT_ASM (amd64_less_unsigned,
1704 "cmp %rax,(%rsp)\n\t"
1705 "jb .Lamd64_less_unsigned_true\n\t"
1706 "xor %rax,%rax\n\t"
1707 "jmp .Lamd64_less_unsigned_end\n\t"
1708 ".Lamd64_less_unsigned_true:\n\t"
1709 "mov $1,%rax\n\t"
1710 ".Lamd64_less_unsigned_end:\n\t"
1711 "lea 0x8(%rsp),%rsp");
1712 }
1713
1714 static void
1715 amd64_emit_ref (int size)
1716 {
1717 switch (size)
1718 {
1719 case 1:
1720 EMIT_ASM (amd64_ref1,
1721 "movb (%rax),%al");
1722 break;
1723 case 2:
1724 EMIT_ASM (amd64_ref2,
1725 "movw (%rax),%ax");
1726 break;
1727 case 4:
1728 EMIT_ASM (amd64_ref4,
1729 "movl (%rax),%eax");
1730 break;
1731 case 8:
1732 EMIT_ASM (amd64_ref8,
1733 "movq (%rax),%rax");
1734 break;
1735 }
1736 }
1737
1738 static void
1739 amd64_emit_if_goto (int *offset_p, int *size_p)
1740 {
1741 EMIT_ASM (amd64_if_goto,
1742 "mov %rax,%rcx\n\t"
1743 "pop %rax\n\t"
1744 "cmp $0,%rcx\n\t"
1745 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1746 if (offset_p)
1747 *offset_p = 10;
1748 if (size_p)
1749 *size_p = 4;
1750 }
1751
1752 static void
1753 amd64_emit_goto (int *offset_p, int *size_p)
1754 {
1755 EMIT_ASM (amd64_goto,
1756 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1757 if (offset_p)
1758 *offset_p = 1;
1759 if (size_p)
1760 *size_p = 4;
1761 }
1762
1763 static void
1764 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1765 {
1766 int diff = (to - (from + size));
1767 unsigned char buf[sizeof (int)];
1768
1769 if (size != 4)
1770 {
1771 emit_error = 1;
1772 return;
1773 }
1774
1775 memcpy (buf, &diff, sizeof (int));
1776 write_inferior_memory (from, buf, sizeof (int));
1777 }
1778
1779 static void
1780 amd64_emit_const (LONGEST num)
1781 {
1782 unsigned char buf[16];
1783 int i;
1784 CORE_ADDR buildaddr = current_insn_ptr;
1785
1786 i = 0;
1787 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1788 memcpy (&buf[i], &num, sizeof (num));
1789 i += 8;
1790 append_insns (&buildaddr, i, buf);
1791 current_insn_ptr = buildaddr;
1792 }
1793
1794 static void
1795 amd64_emit_call (CORE_ADDR fn)
1796 {
1797 unsigned char buf[16];
1798 int i;
1799 CORE_ADDR buildaddr;
1800 LONGEST offset64;
1801
1802 /* The destination function being in the shared library, may be
1803 >31-bits away off the compiled code pad. */
1804
1805 buildaddr = current_insn_ptr;
1806
1807 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1808
1809 i = 0;
1810
1811 if (offset64 > INT_MAX || offset64 < INT_MIN)
1812 {
1813 /* Offset is too large for a call. Use callq, but that requires
1814 a register, so avoid it if possible. Use r10, since it is
1815 call-clobbered, we don't have to push/pop it. */
1816 buf[i++] = 0x48; /* mov $fn,%r10 */
1817 buf[i++] = 0xba;
1818 memcpy (buf + i, &fn, 8);
1819 i += 8;
1820 buf[i++] = 0xff; /* callq *%r10 */
1821 buf[i++] = 0xd2;
1822 }
1823 else
1824 {
1825 int offset32 = offset64; /* we know we can't overflow here. */
1826
1827 buf[i++] = 0xe8; /* call <reladdr> */
1828 memcpy (buf + i, &offset32, 4);
1829 i += 4;
1830 }
1831
1832 append_insns (&buildaddr, i, buf);
1833 current_insn_ptr = buildaddr;
1834 }
1835
1836 static void
1837 amd64_emit_reg (int reg)
1838 {
1839 unsigned char buf[16];
1840 int i;
1841 CORE_ADDR buildaddr;
1842
1843 /* Assume raw_regs is still in %rdi. */
1844 buildaddr = current_insn_ptr;
1845 i = 0;
1846 buf[i++] = 0xbe; /* mov $<n>,%esi */
1847 memcpy (&buf[i], &reg, sizeof (reg));
1848 i += 4;
1849 append_insns (&buildaddr, i, buf);
1850 current_insn_ptr = buildaddr;
1851 amd64_emit_call (get_raw_reg_func_addr ());
1852 }
1853
1854 static void
1855 amd64_emit_pop (void)
1856 {
1857 EMIT_ASM (amd64_pop,
1858 "pop %rax");
1859 }
1860
1861 static void
1862 amd64_emit_stack_flush (void)
1863 {
1864 EMIT_ASM (amd64_stack_flush,
1865 "push %rax");
1866 }
1867
1868 static void
1869 amd64_emit_zero_ext (int arg)
1870 {
1871 switch (arg)
1872 {
1873 case 8:
1874 EMIT_ASM (amd64_zero_ext_8,
1875 "and $0xff,%rax");
1876 break;
1877 case 16:
1878 EMIT_ASM (amd64_zero_ext_16,
1879 "and $0xffff,%rax");
1880 break;
1881 case 32:
1882 EMIT_ASM (amd64_zero_ext_32,
1883 "mov $0xffffffff,%rcx\n\t"
1884 "and %rcx,%rax");
1885 break;
1886 default:
1887 emit_error = 1;
1888 }
1889 }
1890
1891 static void
1892 amd64_emit_swap (void)
1893 {
1894 EMIT_ASM (amd64_swap,
1895 "mov %rax,%rcx\n\t"
1896 "pop %rax\n\t"
1897 "push %rcx");
1898 }
1899
1900 static void
1901 amd64_emit_stack_adjust (int n)
1902 {
1903 unsigned char buf[16];
1904 int i;
1905 CORE_ADDR buildaddr = current_insn_ptr;
1906
1907 i = 0;
1908 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1909 buf[i++] = 0x8d;
1910 buf[i++] = 0x64;
1911 buf[i++] = 0x24;
1912 /* This only handles adjustments up to 16, but we don't expect any more. */
1913 buf[i++] = n * 8;
1914 append_insns (&buildaddr, i, buf);
1915 current_insn_ptr = buildaddr;
1916 }
1917
1918 /* FN's prototype is `LONGEST(*fn)(int)'. */
1919
1920 static void
1921 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1922 {
1923 unsigned char buf[16];
1924 int i;
1925 CORE_ADDR buildaddr;
1926
1927 buildaddr = current_insn_ptr;
1928 i = 0;
1929 buf[i++] = 0xbf; /* movl $<n>,%edi */
1930 memcpy (&buf[i], &arg1, sizeof (arg1));
1931 i += 4;
1932 append_insns (&buildaddr, i, buf);
1933 current_insn_ptr = buildaddr;
1934 amd64_emit_call (fn);
1935 }
1936
1937 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1938
1939 static void
1940 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1941 {
1942 unsigned char buf[16];
1943 int i;
1944 CORE_ADDR buildaddr;
1945
1946 buildaddr = current_insn_ptr;
1947 i = 0;
1948 buf[i++] = 0xbf; /* movl $<n>,%edi */
1949 memcpy (&buf[i], &arg1, sizeof (arg1));
1950 i += 4;
1951 append_insns (&buildaddr, i, buf);
1952 current_insn_ptr = buildaddr;
1953 EMIT_ASM (amd64_void_call_2_a,
1954 /* Save away a copy of the stack top. */
1955 "push %rax\n\t"
1956 /* Also pass top as the second argument. */
1957 "mov %rax,%rsi");
1958 amd64_emit_call (fn);
1959 EMIT_ASM (amd64_void_call_2_b,
1960 /* Restore the stack top, %rax may have been trashed. */
1961 "pop %rax");
1962 }
1963
1964 void
1965 amd64_emit_eq_goto (int *offset_p, int *size_p)
1966 {
1967 EMIT_ASM (amd64_eq,
1968 "cmp %rax,(%rsp)\n\t"
1969 "jne .Lamd64_eq_fallthru\n\t"
1970 "lea 0x8(%rsp),%rsp\n\t"
1971 "pop %rax\n\t"
1972 /* jmp, but don't trust the assembler to choose the right jump */
1973 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1974 ".Lamd64_eq_fallthru:\n\t"
1975 "lea 0x8(%rsp),%rsp\n\t"
1976 "pop %rax");
1977
1978 if (offset_p)
1979 *offset_p = 13;
1980 if (size_p)
1981 *size_p = 4;
1982 }
1983
1984 void
1985 amd64_emit_ne_goto (int *offset_p, int *size_p)
1986 {
1987 EMIT_ASM (amd64_ne,
1988 "cmp %rax,(%rsp)\n\t"
1989 "je .Lamd64_ne_fallthru\n\t"
1990 "lea 0x8(%rsp),%rsp\n\t"
1991 "pop %rax\n\t"
1992 /* jmp, but don't trust the assembler to choose the right jump */
1993 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1994 ".Lamd64_ne_fallthru:\n\t"
1995 "lea 0x8(%rsp),%rsp\n\t"
1996 "pop %rax");
1997
1998 if (offset_p)
1999 *offset_p = 13;
2000 if (size_p)
2001 *size_p = 4;
2002 }
2003
2004 void
2005 amd64_emit_lt_goto (int *offset_p, int *size_p)
2006 {
2007 EMIT_ASM (amd64_lt,
2008 "cmp %rax,(%rsp)\n\t"
2009 "jnl .Lamd64_lt_fallthru\n\t"
2010 "lea 0x8(%rsp),%rsp\n\t"
2011 "pop %rax\n\t"
2012 /* jmp, but don't trust the assembler to choose the right jump */
2013 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2014 ".Lamd64_lt_fallthru:\n\t"
2015 "lea 0x8(%rsp),%rsp\n\t"
2016 "pop %rax");
2017
2018 if (offset_p)
2019 *offset_p = 13;
2020 if (size_p)
2021 *size_p = 4;
2022 }
2023
2024 void
2025 amd64_emit_le_goto (int *offset_p, int *size_p)
2026 {
2027 EMIT_ASM (amd64_le,
2028 "cmp %rax,(%rsp)\n\t"
2029 "jnle .Lamd64_le_fallthru\n\t"
2030 "lea 0x8(%rsp),%rsp\n\t"
2031 "pop %rax\n\t"
2032 /* jmp, but don't trust the assembler to choose the right jump */
2033 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2034 ".Lamd64_le_fallthru:\n\t"
2035 "lea 0x8(%rsp),%rsp\n\t"
2036 "pop %rax");
2037
2038 if (offset_p)
2039 *offset_p = 13;
2040 if (size_p)
2041 *size_p = 4;
2042 }
2043
2044 void
2045 amd64_emit_gt_goto (int *offset_p, int *size_p)
2046 {
2047 EMIT_ASM (amd64_gt,
2048 "cmp %rax,(%rsp)\n\t"
2049 "jng .Lamd64_gt_fallthru\n\t"
2050 "lea 0x8(%rsp),%rsp\n\t"
2051 "pop %rax\n\t"
2052 /* jmp, but don't trust the assembler to choose the right jump */
2053 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2054 ".Lamd64_gt_fallthru:\n\t"
2055 "lea 0x8(%rsp),%rsp\n\t"
2056 "pop %rax");
2057
2058 if (offset_p)
2059 *offset_p = 13;
2060 if (size_p)
2061 *size_p = 4;
2062 }
2063
2064 void
2065 amd64_emit_ge_goto (int *offset_p, int *size_p)
2066 {
2067 EMIT_ASM (amd64_ge,
2068 "cmp %rax,(%rsp)\n\t"
2069 "jnge .Lamd64_ge_fallthru\n\t"
2070 ".Lamd64_ge_jump:\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_ge_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax");
2078
2079 if (offset_p)
2080 *offset_p = 13;
2081 if (size_p)
2082 *size_p = 4;
2083 }
2084
2085 struct emit_ops amd64_emit_ops =
2086 {
2087 amd64_emit_prologue,
2088 amd64_emit_epilogue,
2089 amd64_emit_add,
2090 amd64_emit_sub,
2091 amd64_emit_mul,
2092 amd64_emit_lsh,
2093 amd64_emit_rsh_signed,
2094 amd64_emit_rsh_unsigned,
2095 amd64_emit_ext,
2096 amd64_emit_log_not,
2097 amd64_emit_bit_and,
2098 amd64_emit_bit_or,
2099 amd64_emit_bit_xor,
2100 amd64_emit_bit_not,
2101 amd64_emit_equal,
2102 amd64_emit_less_signed,
2103 amd64_emit_less_unsigned,
2104 amd64_emit_ref,
2105 amd64_emit_if_goto,
2106 amd64_emit_goto,
2107 amd64_write_goto_address,
2108 amd64_emit_const,
2109 amd64_emit_call,
2110 amd64_emit_reg,
2111 amd64_emit_pop,
2112 amd64_emit_stack_flush,
2113 amd64_emit_zero_ext,
2114 amd64_emit_swap,
2115 amd64_emit_stack_adjust,
2116 amd64_emit_int_call_1,
2117 amd64_emit_void_call_2,
2118 amd64_emit_eq_goto,
2119 amd64_emit_ne_goto,
2120 amd64_emit_lt_goto,
2121 amd64_emit_le_goto,
2122 amd64_emit_gt_goto,
2123 amd64_emit_ge_goto
2124 };
2125
2126 #endif /* __x86_64__ */
2127
2128 static void
2129 i386_emit_prologue (void)
2130 {
2131 EMIT_ASM32 (i386_prologue,
2132 "push %ebp\n\t"
2133 "mov %esp,%ebp\n\t"
2134 "push %ebx");
2135 /* At this point, the raw regs base address is at 8(%ebp), and the
2136 value pointer is at 12(%ebp). */
2137 }
2138
2139 static void
2140 i386_emit_epilogue (void)
2141 {
2142 EMIT_ASM32 (i386_epilogue,
2143 "mov 12(%ebp),%ecx\n\t"
2144 "mov %eax,(%ecx)\n\t"
2145 "mov %ebx,0x4(%ecx)\n\t"
2146 "xor %eax,%eax\n\t"
2147 "pop %ebx\n\t"
2148 "pop %ebp\n\t"
2149 "ret");
2150 }
2151
2152 static void
2153 i386_emit_add (void)
2154 {
2155 EMIT_ASM32 (i386_add,
2156 "add (%esp),%eax\n\t"
2157 "adc 0x4(%esp),%ebx\n\t"
2158 "lea 0x8(%esp),%esp");
2159 }
2160
2161 static void
2162 i386_emit_sub (void)
2163 {
2164 EMIT_ASM32 (i386_sub,
2165 "subl %eax,(%esp)\n\t"
2166 "sbbl %ebx,4(%esp)\n\t"
2167 "pop %eax\n\t"
2168 "pop %ebx\n\t");
2169 }
2170
2171 static void
2172 i386_emit_mul (void)
2173 {
2174 emit_error = 1;
2175 }
2176
2177 static void
2178 i386_emit_lsh (void)
2179 {
2180 emit_error = 1;
2181 }
2182
2183 static void
2184 i386_emit_rsh_signed (void)
2185 {
2186 emit_error = 1;
2187 }
2188
2189 static void
2190 i386_emit_rsh_unsigned (void)
2191 {
2192 emit_error = 1;
2193 }
2194
2195 static void
2196 i386_emit_ext (int arg)
2197 {
2198 switch (arg)
2199 {
2200 case 8:
2201 EMIT_ASM32 (i386_ext_8,
2202 "cbtw\n\t"
2203 "cwtl\n\t"
2204 "movl %eax,%ebx\n\t"
2205 "sarl $31,%ebx");
2206 break;
2207 case 16:
2208 EMIT_ASM32 (i386_ext_16,
2209 "cwtl\n\t"
2210 "movl %eax,%ebx\n\t"
2211 "sarl $31,%ebx");
2212 break;
2213 case 32:
2214 EMIT_ASM32 (i386_ext_32,
2215 "movl %eax,%ebx\n\t"
2216 "sarl $31,%ebx");
2217 break;
2218 default:
2219 emit_error = 1;
2220 }
2221 }
2222
2223 static void
2224 i386_emit_log_not (void)
2225 {
2226 EMIT_ASM32 (i386_log_not,
2227 "or %ebx,%eax\n\t"
2228 "test %eax,%eax\n\t"
2229 "sete %cl\n\t"
2230 "xor %ebx,%ebx\n\t"
2231 "movzbl %cl,%eax");
2232 }
2233
2234 static void
2235 i386_emit_bit_and (void)
2236 {
2237 EMIT_ASM32 (i386_and,
2238 "and (%esp),%eax\n\t"
2239 "and 0x4(%esp),%ebx\n\t"
2240 "lea 0x8(%esp),%esp");
2241 }
2242
2243 static void
2244 i386_emit_bit_or (void)
2245 {
2246 EMIT_ASM32 (i386_or,
2247 "or (%esp),%eax\n\t"
2248 "or 0x4(%esp),%ebx\n\t"
2249 "lea 0x8(%esp),%esp");
2250 }
2251
2252 static void
2253 i386_emit_bit_xor (void)
2254 {
2255 EMIT_ASM32 (i386_xor,
2256 "xor (%esp),%eax\n\t"
2257 "xor 0x4(%esp),%ebx\n\t"
2258 "lea 0x8(%esp),%esp");
2259 }
2260
2261 static void
2262 i386_emit_bit_not (void)
2263 {
2264 EMIT_ASM32 (i386_bit_not,
2265 "xor $0xffffffff,%eax\n\t"
2266 "xor $0xffffffff,%ebx\n\t");
2267 }
2268
2269 static void
2270 i386_emit_equal (void)
2271 {
2272 EMIT_ASM32 (i386_equal,
2273 "cmpl %ebx,4(%esp)\n\t"
2274 "jne .Li386_equal_false\n\t"
2275 "cmpl %eax,(%esp)\n\t"
2276 "je .Li386_equal_true\n\t"
2277 ".Li386_equal_false:\n\t"
2278 "xor %eax,%eax\n\t"
2279 "jmp .Li386_equal_end\n\t"
2280 ".Li386_equal_true:\n\t"
2281 "mov $1,%eax\n\t"
2282 ".Li386_equal_end:\n\t"
2283 "xor %ebx,%ebx\n\t"
2284 "lea 0x8(%esp),%esp");
2285 }
2286
2287 static void
2288 i386_emit_less_signed (void)
2289 {
2290 EMIT_ASM32 (i386_less_signed,
2291 "cmpl %ebx,4(%esp)\n\t"
2292 "jl .Li386_less_signed_true\n\t"
2293 "jne .Li386_less_signed_false\n\t"
2294 "cmpl %eax,(%esp)\n\t"
2295 "jl .Li386_less_signed_true\n\t"
2296 ".Li386_less_signed_false:\n\t"
2297 "xor %eax,%eax\n\t"
2298 "jmp .Li386_less_signed_end\n\t"
2299 ".Li386_less_signed_true:\n\t"
2300 "mov $1,%eax\n\t"
2301 ".Li386_less_signed_end:\n\t"
2302 "xor %ebx,%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304 }
2305
2306 static void
2307 i386_emit_less_unsigned (void)
2308 {
2309 EMIT_ASM32 (i386_less_unsigned,
2310 "cmpl %ebx,4(%esp)\n\t"
2311 "jb .Li386_less_unsigned_true\n\t"
2312 "jne .Li386_less_unsigned_false\n\t"
2313 "cmpl %eax,(%esp)\n\t"
2314 "jb .Li386_less_unsigned_true\n\t"
2315 ".Li386_less_unsigned_false:\n\t"
2316 "xor %eax,%eax\n\t"
2317 "jmp .Li386_less_unsigned_end\n\t"
2318 ".Li386_less_unsigned_true:\n\t"
2319 "mov $1,%eax\n\t"
2320 ".Li386_less_unsigned_end:\n\t"
2321 "xor %ebx,%ebx\n\t"
2322 "lea 0x8(%esp),%esp");
2323 }
2324
2325 static void
2326 i386_emit_ref (int size)
2327 {
2328 switch (size)
2329 {
2330 case 1:
2331 EMIT_ASM32 (i386_ref1,
2332 "movb (%eax),%al");
2333 break;
2334 case 2:
2335 EMIT_ASM32 (i386_ref2,
2336 "movw (%eax),%ax");
2337 break;
2338 case 4:
2339 EMIT_ASM32 (i386_ref4,
2340 "movl (%eax),%eax");
2341 break;
2342 case 8:
2343 EMIT_ASM32 (i386_ref8,
2344 "movl 4(%eax),%ebx\n\t"
2345 "movl (%eax),%eax");
2346 break;
2347 }
2348 }
2349
2350 static void
2351 i386_emit_if_goto (int *offset_p, int *size_p)
2352 {
2353 EMIT_ASM32 (i386_if_goto,
2354 "mov %eax,%ecx\n\t"
2355 "or %ebx,%ecx\n\t"
2356 "pop %eax\n\t"
2357 "pop %ebx\n\t"
2358 "cmpl $0,%ecx\n\t"
2359 /* Don't trust the assembler to choose the right jump */
2360 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2361
2362 if (offset_p)
2363 *offset_p = 11; /* be sure that this matches the sequence above */
2364 if (size_p)
2365 *size_p = 4;
2366 }
2367
2368 static void
2369 i386_emit_goto (int *offset_p, int *size_p)
2370 {
2371 EMIT_ASM32 (i386_goto,
2372 /* Don't trust the assembler to choose the right jump */
2373 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2374 if (offset_p)
2375 *offset_p = 1;
2376 if (size_p)
2377 *size_p = 4;
2378 }
2379
2380 static void
2381 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2382 {
2383 int diff = (to - (from + size));
2384 unsigned char buf[sizeof (int)];
2385
2386 /* We're only doing 4-byte sizes at the moment. */
2387 if (size != 4)
2388 {
2389 emit_error = 1;
2390 return;
2391 }
2392
2393 memcpy (buf, &diff, sizeof (int));
2394 write_inferior_memory (from, buf, sizeof (int));
2395 }
2396
2397 static void
2398 i386_emit_const (LONGEST num)
2399 {
2400 unsigned char buf[16];
2401 int i, hi, lo;
2402 CORE_ADDR buildaddr = current_insn_ptr;
2403
2404 i = 0;
2405 buf[i++] = 0xb8; /* mov $<n>,%eax */
2406 lo = num & 0xffffffff;
2407 memcpy (&buf[i], &lo, sizeof (lo));
2408 i += 4;
2409 hi = ((num >> 32) & 0xffffffff);
2410 if (hi)
2411 {
2412 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2413 memcpy (&buf[i], &hi, sizeof (hi));
2414 i += 4;
2415 }
2416 else
2417 {
2418 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2419 }
2420 append_insns (&buildaddr, i, buf);
2421 current_insn_ptr = buildaddr;
2422 }
2423
2424 static void
2425 i386_emit_call (CORE_ADDR fn)
2426 {
2427 unsigned char buf[16];
2428 int i, offset;
2429 CORE_ADDR buildaddr;
2430
2431 buildaddr = current_insn_ptr;
2432 i = 0;
2433 buf[i++] = 0xe8; /* call <reladdr> */
2434 offset = ((int) fn) - (buildaddr + 5);
2435 memcpy (buf + 1, &offset, 4);
2436 append_insns (&buildaddr, 5, buf);
2437 current_insn_ptr = buildaddr;
2438 }
2439
2440 static void
2441 i386_emit_reg (int reg)
2442 {
2443 unsigned char buf[16];
2444 int i;
2445 CORE_ADDR buildaddr;
2446
2447 EMIT_ASM32 (i386_reg_a,
2448 "sub $0x8,%esp");
2449 buildaddr = current_insn_ptr;
2450 i = 0;
2451 buf[i++] = 0xb8; /* mov $<n>,%eax */
2452 memcpy (&buf[i], &reg, sizeof (reg));
2453 i += 4;
2454 append_insns (&buildaddr, i, buf);
2455 current_insn_ptr = buildaddr;
2456 EMIT_ASM32 (i386_reg_b,
2457 "mov %eax,4(%esp)\n\t"
2458 "mov 8(%ebp),%eax\n\t"
2459 "mov %eax,(%esp)");
2460 i386_emit_call (get_raw_reg_func_addr ());
2461 EMIT_ASM32 (i386_reg_c,
2462 "xor %ebx,%ebx\n\t"
2463 "lea 0x8(%esp),%esp");
2464 }
2465
2466 static void
2467 i386_emit_pop (void)
2468 {
2469 EMIT_ASM32 (i386_pop,
2470 "pop %eax\n\t"
2471 "pop %ebx");
2472 }
2473
2474 static void
2475 i386_emit_stack_flush (void)
2476 {
2477 EMIT_ASM32 (i386_stack_flush,
2478 "push %ebx\n\t"
2479 "push %eax");
2480 }
2481
2482 static void
2483 i386_emit_zero_ext (int arg)
2484 {
2485 switch (arg)
2486 {
2487 case 8:
2488 EMIT_ASM32 (i386_zero_ext_8,
2489 "and $0xff,%eax\n\t"
2490 "xor %ebx,%ebx");
2491 break;
2492 case 16:
2493 EMIT_ASM32 (i386_zero_ext_16,
2494 "and $0xffff,%eax\n\t"
2495 "xor %ebx,%ebx");
2496 break;
2497 case 32:
2498 EMIT_ASM32 (i386_zero_ext_32,
2499 "xor %ebx,%ebx");
2500 break;
2501 default:
2502 emit_error = 1;
2503 }
2504 }
2505
2506 static void
2507 i386_emit_swap (void)
2508 {
2509 EMIT_ASM32 (i386_swap,
2510 "mov %eax,%ecx\n\t"
2511 "mov %ebx,%edx\n\t"
2512 "pop %eax\n\t"
2513 "pop %ebx\n\t"
2514 "push %edx\n\t"
2515 "push %ecx");
2516 }
2517
2518 static void
2519 i386_emit_stack_adjust (int n)
2520 {
2521 unsigned char buf[16];
2522 int i;
2523 CORE_ADDR buildaddr = current_insn_ptr;
2524
2525 i = 0;
2526 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2527 buf[i++] = 0x64;
2528 buf[i++] = 0x24;
2529 buf[i++] = n * 8;
2530 append_insns (&buildaddr, i, buf);
2531 current_insn_ptr = buildaddr;
2532 }
2533
2534 /* FN's prototype is `LONGEST(*fn)(int)'. */
2535
2536 static void
2537 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2538 {
2539 unsigned char buf[16];
2540 int i;
2541 CORE_ADDR buildaddr;
2542
2543 EMIT_ASM32 (i386_int_call_1_a,
2544 /* Reserve a bit of stack space. */
2545 "sub $0x8,%esp");
2546 /* Put the one argument on the stack. */
2547 buildaddr = current_insn_ptr;
2548 i = 0;
2549 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2550 buf[i++] = 0x04;
2551 buf[i++] = 0x24;
2552 memcpy (&buf[i], &arg1, sizeof (arg1));
2553 i += 4;
2554 append_insns (&buildaddr, i, buf);
2555 current_insn_ptr = buildaddr;
2556 i386_emit_call (fn);
2557 EMIT_ASM32 (i386_int_call_1_c,
2558 "mov %edx,%ebx\n\t"
2559 "lea 0x8(%esp),%esp");
2560 }
2561
2562 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2563
2564 static void
2565 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2566 {
2567 unsigned char buf[16];
2568 int i;
2569 CORE_ADDR buildaddr;
2570
2571 EMIT_ASM32 (i386_void_call_2_a,
2572 /* Preserve %eax only; we don't have to worry about %ebx. */
2573 "push %eax\n\t"
2574 /* Reserve a bit of stack space for arguments. */
2575 "sub $0x10,%esp\n\t"
2576 /* Copy "top" to the second argument position. (Note that
2577 we can't assume function won't scribble on its
2578 arguments, so don't try to restore from this.) */
2579 "mov %eax,4(%esp)\n\t"
2580 "mov %ebx,8(%esp)");
2581 /* Put the first argument on the stack. */
2582 buildaddr = current_insn_ptr;
2583 i = 0;
2584 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2585 buf[i++] = 0x04;
2586 buf[i++] = 0x24;
2587 memcpy (&buf[i], &arg1, sizeof (arg1));
2588 i += 4;
2589 append_insns (&buildaddr, i, buf);
2590 current_insn_ptr = buildaddr;
2591 i386_emit_call (fn);
2592 EMIT_ASM32 (i386_void_call_2_b,
2593 "lea 0x10(%esp),%esp\n\t"
2594 /* Restore original stack top. */
2595 "pop %eax");
2596 }
2597
2598
2599 void
2600 i386_emit_eq_goto (int *offset_p, int *size_p)
2601 {
2602 EMIT_ASM32 (eq,
2603 /* Check low half first, more likely to be decider */
2604 "cmpl %eax,(%esp)\n\t"
2605 "jne .Leq_fallthru\n\t"
2606 "cmpl %ebx,4(%esp)\n\t"
2607 "jne .Leq_fallthru\n\t"
2608 "lea 0x8(%esp),%esp\n\t"
2609 "pop %eax\n\t"
2610 "pop %ebx\n\t"
2611 /* jmp, but don't trust the assembler to choose the right jump */
2612 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2613 ".Leq_fallthru:\n\t"
2614 "lea 0x8(%esp),%esp\n\t"
2615 "pop %eax\n\t"
2616 "pop %ebx");
2617
2618 if (offset_p)
2619 *offset_p = 18;
2620 if (size_p)
2621 *size_p = 4;
2622 }
2623
2624 void
2625 i386_emit_ne_goto (int *offset_p, int *size_p)
2626 {
2627 EMIT_ASM32 (ne,
2628 /* Check low half first, more likely to be decider */
2629 "cmpl %eax,(%esp)\n\t"
2630 "jne .Lne_jump\n\t"
2631 "cmpl %ebx,4(%esp)\n\t"
2632 "je .Lne_fallthru\n\t"
2633 ".Lne_jump:\n\t"
2634 "lea 0x8(%esp),%esp\n\t"
2635 "pop %eax\n\t"
2636 "pop %ebx\n\t"
2637 /* jmp, but don't trust the assembler to choose the right jump */
2638 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2639 ".Lne_fallthru:\n\t"
2640 "lea 0x8(%esp),%esp\n\t"
2641 "pop %eax\n\t"
2642 "pop %ebx");
2643
2644 if (offset_p)
2645 *offset_p = 18;
2646 if (size_p)
2647 *size_p = 4;
2648 }
2649
2650 void
2651 i386_emit_lt_goto (int *offset_p, int *size_p)
2652 {
2653 EMIT_ASM32 (lt,
2654 "cmpl %ebx,4(%esp)\n\t"
2655 "jl .Llt_jump\n\t"
2656 "jne .Llt_fallthru\n\t"
2657 "cmpl %eax,(%esp)\n\t"
2658 "jnl .Llt_fallthru\n\t"
2659 ".Llt_jump:\n\t"
2660 "lea 0x8(%esp),%esp\n\t"
2661 "pop %eax\n\t"
2662 "pop %ebx\n\t"
2663 /* jmp, but don't trust the assembler to choose the right jump */
2664 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2665 ".Llt_fallthru:\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx");
2669
2670 if (offset_p)
2671 *offset_p = 20;
2672 if (size_p)
2673 *size_p = 4;
2674 }
2675
2676 void
2677 i386_emit_le_goto (int *offset_p, int *size_p)
2678 {
2679 EMIT_ASM32 (le,
2680 "cmpl %ebx,4(%esp)\n\t"
2681 "jle .Lle_jump\n\t"
2682 "jne .Lle_fallthru\n\t"
2683 "cmpl %eax,(%esp)\n\t"
2684 "jnle .Lle_fallthru\n\t"
2685 ".Lle_jump:\n\t"
2686 "lea 0x8(%esp),%esp\n\t"
2687 "pop %eax\n\t"
2688 "pop %ebx\n\t"
2689 /* jmp, but don't trust the assembler to choose the right jump */
2690 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2691 ".Lle_fallthru:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx");
2695
2696 if (offset_p)
2697 *offset_p = 20;
2698 if (size_p)
2699 *size_p = 4;
2700 }
2701
2702 void
2703 i386_emit_gt_goto (int *offset_p, int *size_p)
2704 {
2705 EMIT_ASM32 (gt,
2706 "cmpl %ebx,4(%esp)\n\t"
2707 "jg .Lgt_jump\n\t"
2708 "jne .Lgt_fallthru\n\t"
2709 "cmpl %eax,(%esp)\n\t"
2710 "jng .Lgt_fallthru\n\t"
2711 ".Lgt_jump:\n\t"
2712 "lea 0x8(%esp),%esp\n\t"
2713 "pop %eax\n\t"
2714 "pop %ebx\n\t"
2715 /* jmp, but don't trust the assembler to choose the right jump */
2716 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2717 ".Lgt_fallthru:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx");
2721
2722 if (offset_p)
2723 *offset_p = 20;
2724 if (size_p)
2725 *size_p = 4;
2726 }
2727
2728 void
2729 i386_emit_ge_goto (int *offset_p, int *size_p)
2730 {
2731 EMIT_ASM32 (ge,
2732 "cmpl %ebx,4(%esp)\n\t"
2733 "jge .Lge_jump\n\t"
2734 "jne .Lge_fallthru\n\t"
2735 "cmpl %eax,(%esp)\n\t"
2736 "jnge .Lge_fallthru\n\t"
2737 ".Lge_jump:\n\t"
2738 "lea 0x8(%esp),%esp\n\t"
2739 "pop %eax\n\t"
2740 "pop %ebx\n\t"
2741 /* jmp, but don't trust the assembler to choose the right jump */
2742 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2743 ".Lge_fallthru:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx");
2747
2748 if (offset_p)
2749 *offset_p = 20;
2750 if (size_p)
2751 *size_p = 4;
2752 }
2753
2754 struct emit_ops i386_emit_ops =
2755 {
2756 i386_emit_prologue,
2757 i386_emit_epilogue,
2758 i386_emit_add,
2759 i386_emit_sub,
2760 i386_emit_mul,
2761 i386_emit_lsh,
2762 i386_emit_rsh_signed,
2763 i386_emit_rsh_unsigned,
2764 i386_emit_ext,
2765 i386_emit_log_not,
2766 i386_emit_bit_and,
2767 i386_emit_bit_or,
2768 i386_emit_bit_xor,
2769 i386_emit_bit_not,
2770 i386_emit_equal,
2771 i386_emit_less_signed,
2772 i386_emit_less_unsigned,
2773 i386_emit_ref,
2774 i386_emit_if_goto,
2775 i386_emit_goto,
2776 i386_write_goto_address,
2777 i386_emit_const,
2778 i386_emit_call,
2779 i386_emit_reg,
2780 i386_emit_pop,
2781 i386_emit_stack_flush,
2782 i386_emit_zero_ext,
2783 i386_emit_swap,
2784 i386_emit_stack_adjust,
2785 i386_emit_int_call_1,
2786 i386_emit_void_call_2,
2787 i386_emit_eq_goto,
2788 i386_emit_ne_goto,
2789 i386_emit_lt_goto,
2790 i386_emit_le_goto,
2791 i386_emit_gt_goto,
2792 i386_emit_ge_goto
2793 };
2794
2795
2796 static struct emit_ops *
2797 x86_emit_ops (void)
2798 {
2799 #ifdef __x86_64__
2800 if (is_64bit_tdesc ())
2801 return &amd64_emit_ops;
2802 else
2803 #endif
2804 return &i386_emit_ops;
2805 }
2806
2807 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2808
2809 static const gdb_byte *
2810 x86_sw_breakpoint_from_kind (int kind, int *size)
2811 {
2812 *size = x86_breakpoint_len;
2813 return x86_breakpoint;
2814 }
2815
2816 static int
2817 x86_supports_range_stepping (void)
2818 {
2819 return 1;
2820 }
2821
2822 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2823 */
2824
2825 static int
2826 x86_supports_hardware_single_step (void)
2827 {
2828 return 1;
2829 }
2830
2831 static int
2832 x86_get_ipa_tdesc_idx (void)
2833 {
2834 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2835 const struct target_desc *tdesc = regcache->tdesc;
2836
2837 #ifdef __x86_64__
2838 return amd64_get_ipa_tdesc_idx (tdesc);
2839 #endif
2840
2841 if (tdesc == tdesc_i386_linux_no_xml)
2842 return X86_TDESC_SSE;
2843
2844 return i386_get_ipa_tdesc_idx (tdesc);
2845 }
2846
2847 /* This is initialized assuming an amd64 target.
2848 x86_arch_setup will correct it for i386 or amd64 targets. */
2849
2850 struct linux_target_ops the_low_target =
2851 {
2852 x86_arch_setup,
2853 x86_linux_regs_info,
2854 x86_cannot_fetch_register,
2855 x86_cannot_store_register,
2856 NULL, /* fetch_register */
2857 x86_get_pc,
2858 x86_set_pc,
2859 NULL, /* breakpoint_kind_from_pc */
2860 x86_sw_breakpoint_from_kind,
2861 NULL,
2862 1,
2863 x86_breakpoint_at,
2864 x86_supports_z_point_type,
2865 x86_insert_point,
2866 x86_remove_point,
2867 x86_stopped_by_watchpoint,
2868 x86_stopped_data_address,
2869 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2870 native i386 case (no registers smaller than an xfer unit), and are not
2871 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2872 NULL,
2873 NULL,
2874 /* need to fix up i386 siginfo if host is amd64 */
2875 x86_siginfo_fixup,
2876 x86_linux_new_process,
2877 x86_linux_delete_process,
2878 x86_linux_new_thread,
2879 x86_linux_delete_thread,
2880 x86_linux_new_fork,
2881 x86_linux_prepare_to_resume,
2882 x86_linux_process_qsupported,
2883 x86_supports_tracepoints,
2884 x86_get_thread_area,
2885 x86_install_fast_tracepoint_jump_pad,
2886 x86_emit_ops,
2887 x86_get_min_fast_tracepoint_insn_len,
2888 x86_supports_range_stepping,
2889 NULL, /* breakpoint_kind_from_current_state */
2890 x86_supports_hardware_single_step,
2891 x86_get_syscall_trapinfo,
2892 x86_get_ipa_tdesc_idx,
2893 };
2894
2895 void
2896 initialize_low_arch (void)
2897 {
2898 /* Initialize the Linux target descriptions. */
2899 #ifdef __x86_64__
2900 tdesc_amd64_linux_no_xml = allocate_target_description ();
2901 copy_target_description (tdesc_amd64_linux_no_xml,
2902 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2903 false));
2904 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2905 #endif
2906
2907 #if GDB_SELF_TEST
2908 initialize_low_tdesc ();
2909 #endif
2910
2911 tdesc_i386_linux_no_xml = allocate_target_description ();
2912 copy_target_description (tdesc_i386_linux_no_xml,
2913 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2914 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2915
2916 initialize_regsets_info (&x86_regsets_info);
2917 }
This page took 0.112571 seconds and 5 git commands to generate.