amd64-linux: expose system register FS_BASE and GS_BASE for Linux.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
76 #include <sys/uio.h>
77
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
81
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
86
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
95
96 /* Per-process arch-specific data we want to keep. */
97
98 struct arch_process_info
99 {
100 struct x86_debug_reg_state debug_reg_state;
101 };
102
103 #ifdef __x86_64__
104
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap[] =
109 {
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114 };
115
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
120 #define REGSIZE 8
121
122 static const int x86_64_regmap[] =
123 {
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
137 21 * 8, 22 * 8,
138 #else
139 -1, -1,
140 #endif
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1
152 };
153
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
156
157 #else /* ! __x86_64__ */
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap[] =
162 {
163 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
164 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
165 EIP * 4, EFL * 4, CS * 4, SS * 4,
166 DS * 4, ES * 4, FS * 4, GS * 4
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 #define REGSIZE 4
172
173 #endif
174
175 #ifdef __x86_64__
176
177 /* Returns true if the current inferior belongs to a x86-64 process,
178 per the tdesc. */
179
180 static int
181 is_64bit_tdesc (void)
182 {
183 struct regcache *regcache = get_thread_regcache (current_thread, 0);
184
185 return register_size (regcache->tdesc, 0) == 8;
186 }
187
188 #endif
189
190 \f
191 /* Called by libthread_db. */
192
193 ps_err_e
194 ps_get_thread_area (struct ps_prochandle *ph,
195 lwpid_t lwpid, int idx, void **base)
196 {
197 #ifdef __x86_64__
198 int use_64bit = is_64bit_tdesc ();
199
200 if (use_64bit)
201 {
202 switch (idx)
203 {
204 case FS:
205 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
206 return PS_OK;
207 break;
208 case GS:
209 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
210 return PS_OK;
211 break;
212 default:
213 return PS_BADADDR;
214 }
215 return PS_ERR;
216 }
217 #endif
218
219 {
220 unsigned int desc[4];
221
222 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
223 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
224 return PS_ERR;
225
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base = (void *) (uintptr_t) desc[1];
228 return PS_OK;
229 }
230 }
231
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
236
237 static int
238 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
239 {
240 #ifdef __x86_64__
241 int use_64bit = is_64bit_tdesc ();
242
243 if (use_64bit)
244 {
245 void *base;
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
247 {
248 *addr = (CORE_ADDR) (uintptr_t) base;
249 return 0;
250 }
251
252 return -1;
253 }
254 #endif
255
256 {
257 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
258 struct thread_info *thr = get_lwp_thread (lwp);
259 struct regcache *regcache = get_thread_regcache (thr, 1);
260 unsigned int desc[4];
261 ULONGEST gs = 0;
262 const int reg_thread_area = 3; /* bits to scale down register value. */
263 int idx;
264
265 collect_register_by_name (regcache, "gs", &gs);
266
267 idx = gs >> reg_thread_area;
268
269 if (ptrace (PTRACE_GET_THREAD_AREA,
270 lwpid_of (thr),
271 (void *) (long) idx, (unsigned long) &desc) < 0)
272 return -1;
273
274 *addr = desc[1];
275 return 0;
276 }
277 }
278
279
280 \f
281 static int
282 x86_cannot_store_register (int regno)
283 {
284 #ifdef __x86_64__
285 if (is_64bit_tdesc ())
286 return 0;
287 #endif
288
289 return regno >= I386_NUM_REGS;
290 }
291
292 static int
293 x86_cannot_fetch_register (int regno)
294 {
295 #ifdef __x86_64__
296 if (is_64bit_tdesc ())
297 return 0;
298 #endif
299
300 return regno >= I386_NUM_REGS;
301 }
302
303 static void
304 x86_fill_gregset (struct regcache *regcache, void *buf)
305 {
306 int i;
307
308 #ifdef __x86_64__
309 if (register_size (regcache->tdesc, 0) == 8)
310 {
311 for (i = 0; i < X86_64_NUM_REGS; i++)
312 if (x86_64_regmap[i] != -1)
313 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
314
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
316 {
317 unsigned long base;
318 int lwpid = lwpid_of (current_thread);
319
320 collect_register_by_name (regcache, "fs_base", &base);
321 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
322
323 collect_register_by_name (regcache, "gs_base", &base);
324 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
325 }
326 #endif
327
328 return;
329 }
330
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf, 0x00, X86_64_USER_REGS * 8);
334 #endif
335
336 for (i = 0; i < I386_NUM_REGS; i++)
337 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
338
339 collect_register_by_name (regcache, "orig_eax",
340 ((char *) buf) + ORIG_EAX * REGSIZE);
341 }
342
343 static void
344 x86_store_gregset (struct regcache *regcache, const void *buf)
345 {
346 int i;
347
348 #ifdef __x86_64__
349 if (register_size (regcache->tdesc, 0) == 8)
350 {
351 for (i = 0; i < X86_64_NUM_REGS; i++)
352 if (x86_64_regmap[i] != -1)
353 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
354
355 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
356 {
357 unsigned long base;
358 int lwpid = lwpid_of (current_thread);
359
360 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
361 supply_register_by_name (regcache, "fs_base", &base);
362
363 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
364 supply_register_by_name (regcache, "gs_base", &base);
365 }
366 #endif
367 return;
368 }
369 #endif
370
371 for (i = 0; i < I386_NUM_REGS; i++)
372 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
373
374 supply_register_by_name (regcache, "orig_eax",
375 ((char *) buf) + ORIG_EAX * REGSIZE);
376 }
377
378 static void
379 x86_fill_fpregset (struct regcache *regcache, void *buf)
380 {
381 #ifdef __x86_64__
382 i387_cache_to_fxsave (regcache, buf);
383 #else
384 i387_cache_to_fsave (regcache, buf);
385 #endif
386 }
387
388 static void
389 x86_store_fpregset (struct regcache *regcache, const void *buf)
390 {
391 #ifdef __x86_64__
392 i387_fxsave_to_cache (regcache, buf);
393 #else
394 i387_fsave_to_cache (regcache, buf);
395 #endif
396 }
397
398 #ifndef __x86_64__
399
400 static void
401 x86_fill_fpxregset (struct regcache *regcache, void *buf)
402 {
403 i387_cache_to_fxsave (regcache, buf);
404 }
405
406 static void
407 x86_store_fpxregset (struct regcache *regcache, const void *buf)
408 {
409 i387_fxsave_to_cache (regcache, buf);
410 }
411
412 #endif
413
414 static void
415 x86_fill_xstateregset (struct regcache *regcache, void *buf)
416 {
417 i387_cache_to_xsave (regcache, buf);
418 }
419
420 static void
421 x86_store_xstateregset (struct regcache *regcache, const void *buf)
422 {
423 i387_xsave_to_cache (regcache, buf);
424 }
425
426 /* ??? The non-biarch i386 case stores all the i387 regs twice.
427 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
428 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
429 doesn't work. IWBN to avoid the duplication in the case where it
430 does work. Maybe the arch_setup routine could check whether it works
431 and update the supported regsets accordingly. */
432
433 static struct regset_info x86_regsets[] =
434 {
435 #ifdef HAVE_PTRACE_GETREGS
436 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
437 GENERAL_REGS,
438 x86_fill_gregset, x86_store_gregset },
439 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
440 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
441 # ifndef __x86_64__
442 # ifdef HAVE_PTRACE_GETFPXREGS
443 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
444 EXTENDED_REGS,
445 x86_fill_fpxregset, x86_store_fpxregset },
446 # endif
447 # endif
448 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
449 FP_REGS,
450 x86_fill_fpregset, x86_store_fpregset },
451 #endif /* HAVE_PTRACE_GETREGS */
452 NULL_REGSET
453 };
454
455 static CORE_ADDR
456 x86_get_pc (struct regcache *regcache)
457 {
458 int use_64bit = register_size (regcache->tdesc, 0) == 8;
459
460 if (use_64bit)
461 {
462 uint64_t pc;
463
464 collect_register_by_name (regcache, "rip", &pc);
465 return (CORE_ADDR) pc;
466 }
467 else
468 {
469 uint32_t pc;
470
471 collect_register_by_name (regcache, "eip", &pc);
472 return (CORE_ADDR) pc;
473 }
474 }
475
476 static void
477 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
478 {
479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
480
481 if (use_64bit)
482 {
483 uint64_t newpc = pc;
484
485 supply_register_by_name (regcache, "rip", &newpc);
486 }
487 else
488 {
489 uint32_t newpc = pc;
490
491 supply_register_by_name (regcache, "eip", &newpc);
492 }
493 }
494 \f
495 static const gdb_byte x86_breakpoint[] = { 0xCC };
496 #define x86_breakpoint_len 1
497
498 static int
499 x86_breakpoint_at (CORE_ADDR pc)
500 {
501 unsigned char c;
502
503 (*the_target->read_memory) (pc, &c, 1);
504 if (c == 0xCC)
505 return 1;
506
507 return 0;
508 }
509 \f
510 /* Low-level function vector. */
511 struct x86_dr_low_type x86_dr_low =
512 {
513 x86_linux_dr_set_control,
514 x86_linux_dr_set_addr,
515 x86_linux_dr_get_addr,
516 x86_linux_dr_get_status,
517 x86_linux_dr_get_control,
518 sizeof (void *),
519 };
520 \f
521 /* Breakpoint/Watchpoint support. */
522
523 static int
524 x86_supports_z_point_type (char z_type)
525 {
526 switch (z_type)
527 {
528 case Z_PACKET_SW_BP:
529 case Z_PACKET_HW_BP:
530 case Z_PACKET_WRITE_WP:
531 case Z_PACKET_ACCESS_WP:
532 return 1;
533 default:
534 return 0;
535 }
536 }
537
538 static int
539 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
540 int size, struct raw_breakpoint *bp)
541 {
542 struct process_info *proc = current_process ();
543
544 switch (type)
545 {
546 case raw_bkpt_type_hw:
547 case raw_bkpt_type_write_wp:
548 case raw_bkpt_type_access_wp:
549 {
550 enum target_hw_bp_type hw_type
551 = raw_bkpt_type_to_target_hw_bp_type (type);
552 struct x86_debug_reg_state *state
553 = &proc->priv->arch_private->debug_reg_state;
554
555 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
556 }
557
558 default:
559 /* Unsupported. */
560 return 1;
561 }
562 }
563
564 static int
565 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
566 int size, struct raw_breakpoint *bp)
567 {
568 struct process_info *proc = current_process ();
569
570 switch (type)
571 {
572 case raw_bkpt_type_hw:
573 case raw_bkpt_type_write_wp:
574 case raw_bkpt_type_access_wp:
575 {
576 enum target_hw_bp_type hw_type
577 = raw_bkpt_type_to_target_hw_bp_type (type);
578 struct x86_debug_reg_state *state
579 = &proc->priv->arch_private->debug_reg_state;
580
581 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
582 }
583 default:
584 /* Unsupported. */
585 return 1;
586 }
587 }
588
589 static int
590 x86_stopped_by_watchpoint (void)
591 {
592 struct process_info *proc = current_process ();
593 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
594 }
595
596 static CORE_ADDR
597 x86_stopped_data_address (void)
598 {
599 struct process_info *proc = current_process ();
600 CORE_ADDR addr;
601 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
602 &addr))
603 return addr;
604 return 0;
605 }
606 \f
607 /* Called when a new process is created. */
608
609 static struct arch_process_info *
610 x86_linux_new_process (void)
611 {
612 struct arch_process_info *info = XCNEW (struct arch_process_info);
613
614 x86_low_init_dregs (&info->debug_reg_state);
615
616 return info;
617 }
618
619 /* Target routine for linux_new_fork. */
620
621 static void
622 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
623 {
624 /* These are allocated by linux_add_process. */
625 gdb_assert (parent->priv != NULL
626 && parent->priv->arch_private != NULL);
627 gdb_assert (child->priv != NULL
628 && child->priv->arch_private != NULL);
629
630 /* Linux kernel before 2.6.33 commit
631 72f674d203cd230426437cdcf7dd6f681dad8b0d
632 will inherit hardware debug registers from parent
633 on fork/vfork/clone. Newer Linux kernels create such tasks with
634 zeroed debug registers.
635
636 GDB core assumes the child inherits the watchpoints/hw
637 breakpoints of the parent, and will remove them all from the
638 forked off process. Copy the debug registers mirrors into the
639 new process so that all breakpoints and watchpoints can be
640 removed together. The debug registers mirror will become zeroed
641 in the end before detaching the forked off process, thus making
642 this compatible with older Linux kernels too. */
643
644 *child->priv->arch_private = *parent->priv->arch_private;
645 }
646
647 /* See nat/x86-dregs.h. */
648
649 struct x86_debug_reg_state *
650 x86_debug_reg_state (pid_t pid)
651 {
652 struct process_info *proc = find_process_pid (pid);
653
654 return &proc->priv->arch_private->debug_reg_state;
655 }
656 \f
657 /* When GDBSERVER is built as a 64-bit application on linux, the
658 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
659 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
660 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
661 conversion in-place ourselves. */
662
663 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
664 layout of the inferiors' architecture. Returns true if any
665 conversion was done; false otherwise. If DIRECTION is 1, then copy
666 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
667 INF. */
668
669 static int
670 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
671 {
672 #ifdef __x86_64__
673 unsigned int machine;
674 int tid = lwpid_of (current_thread);
675 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
676
677 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
678 if (!is_64bit_tdesc ())
679 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
680 FIXUP_32);
681 /* No fixup for native x32 GDB. */
682 else if (!is_elf64 && sizeof (void *) == 8)
683 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
684 FIXUP_X32);
685 #endif
686
687 return 0;
688 }
689 \f
690 static int use_xml;
691
692 /* Format of XSAVE extended state is:
693 struct
694 {
695 fxsave_bytes[0..463]
696 sw_usable_bytes[464..511]
697 xstate_hdr_bytes[512..575]
698 avx_bytes[576..831]
699 future_state etc
700 };
701
702 Same memory layout will be used for the coredump NT_X86_XSTATE
703 representing the XSAVE extended state registers.
704
705 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
706 extended state mask, which is the same as the extended control register
707 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
708 together with the mask saved in the xstate_hdr_bytes to determine what
709 states the processor/OS supports and what state, used or initialized,
710 the process/thread is in. */
711 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
712
713 /* Does the current host support the GETFPXREGS request? The header
714 file may or may not define it, and even if it is defined, the
715 kernel will return EIO if it's running on a pre-SSE processor. */
716 int have_ptrace_getfpxregs =
717 #ifdef HAVE_PTRACE_GETFPXREGS
718 -1
719 #else
720 0
721 #endif
722 ;
723
724 /* Get Linux/x86 target description from running target. */
725
726 static const struct target_desc *
727 x86_linux_read_description (void)
728 {
729 unsigned int machine;
730 int is_elf64;
731 int xcr0_features;
732 int tid;
733 static uint64_t xcr0;
734 struct regset_info *regset;
735
736 tid = lwpid_of (current_thread);
737
738 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
739
740 if (sizeof (void *) == 4)
741 {
742 if (is_elf64 > 0)
743 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
744 #ifndef __x86_64__
745 else if (machine == EM_X86_64)
746 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
747 #endif
748 }
749
750 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
751 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
752 {
753 elf_fpxregset_t fpxregs;
754
755 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
756 {
757 have_ptrace_getfpxregs = 0;
758 have_ptrace_getregset = 0;
759 return tdesc_i386_mmx_linux;
760 }
761 else
762 have_ptrace_getfpxregs = 1;
763 }
764 #endif
765
766 if (!use_xml)
767 {
768 x86_xcr0 = X86_XSTATE_SSE_MASK;
769
770 /* Don't use XML. */
771 #ifdef __x86_64__
772 if (machine == EM_X86_64)
773 return tdesc_amd64_linux_no_xml;
774 else
775 #endif
776 return tdesc_i386_linux_no_xml;
777 }
778
779 if (have_ptrace_getregset == -1)
780 {
781 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
782 struct iovec iov;
783
784 iov.iov_base = xstateregs;
785 iov.iov_len = sizeof (xstateregs);
786
787 /* Check if PTRACE_GETREGSET works. */
788 if (ptrace (PTRACE_GETREGSET, tid,
789 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
790 have_ptrace_getregset = 0;
791 else
792 {
793 have_ptrace_getregset = 1;
794
795 /* Get XCR0 from XSAVE extended state. */
796 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
797 / sizeof (uint64_t))];
798
799 /* Use PTRACE_GETREGSET if it is available. */
800 for (regset = x86_regsets;
801 regset->fill_function != NULL; regset++)
802 if (regset->get_request == PTRACE_GETREGSET)
803 regset->size = X86_XSTATE_SIZE (xcr0);
804 else if (regset->type != GENERAL_REGS)
805 regset->size = 0;
806 }
807 }
808
809 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
810 xcr0_features = (have_ptrace_getregset
811 && (xcr0 & X86_XSTATE_ALL_MASK));
812
813 if (xcr0_features)
814 x86_xcr0 = xcr0;
815
816 if (machine == EM_X86_64)
817 {
818 #ifdef __x86_64__
819 if (is_elf64)
820 {
821 if (xcr0_features)
822 {
823 switch (xcr0 & X86_XSTATE_ALL_MASK)
824 {
825 case X86_XSTATE_AVX512_MASK:
826 return tdesc_amd64_avx512_linux;
827
828 case X86_XSTATE_AVX_MPX_MASK:
829 return tdesc_amd64_avx_mpx_linux;
830
831 case X86_XSTATE_MPX_MASK:
832 return tdesc_amd64_mpx_linux;
833
834 case X86_XSTATE_AVX_MASK:
835 return tdesc_amd64_avx_linux;
836
837 default:
838 return tdesc_amd64_linux;
839 }
840 }
841 else
842 return tdesc_amd64_linux;
843 }
844 else
845 {
846 if (xcr0_features)
847 {
848 switch (xcr0 & X86_XSTATE_ALL_MASK)
849 {
850 case X86_XSTATE_AVX512_MASK:
851 return tdesc_x32_avx512_linux;
852
853 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
854 case X86_XSTATE_AVX_MASK:
855 return tdesc_x32_avx_linux;
856
857 default:
858 return tdesc_x32_linux;
859 }
860 }
861 else
862 return tdesc_x32_linux;
863 }
864 #endif
865 }
866 else
867 {
868 if (xcr0_features)
869 {
870 switch (xcr0 & X86_XSTATE_ALL_MASK)
871 {
872 case (X86_XSTATE_AVX512_MASK):
873 return tdesc_i386_avx512_linux;
874
875 case (X86_XSTATE_MPX_MASK):
876 return tdesc_i386_mpx_linux;
877
878 case (X86_XSTATE_AVX_MPX_MASK):
879 return tdesc_i386_avx_mpx_linux;
880
881 case (X86_XSTATE_AVX_MASK):
882 return tdesc_i386_avx_linux;
883
884 default:
885 return tdesc_i386_linux;
886 }
887 }
888 else
889 return tdesc_i386_linux;
890 }
891
892 gdb_assert_not_reached ("failed to return tdesc");
893 }
894
895 /* Callback for find_inferior. Stops iteration when a thread with a
896 given PID is found. */
897
898 static int
899 same_process_callback (struct inferior_list_entry *entry, void *data)
900 {
901 int pid = *(int *) data;
902
903 return (ptid_get_pid (entry->id) == pid);
904 }
905
906 /* Callback for for_each_inferior. Calls the arch_setup routine for
907 each process. */
908
909 static void
910 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
911 {
912 int pid = ptid_get_pid (entry->id);
913
914 /* Look up any thread of this processes. */
915 current_thread
916 = (struct thread_info *) find_inferior (&all_threads,
917 same_process_callback, &pid);
918
919 the_low_target.arch_setup ();
920 }
921
922 /* Update all the target description of all processes; a new GDB
923 connected, and it may or not support xml target descriptions. */
924
925 static void
926 x86_linux_update_xmltarget (void)
927 {
928 struct thread_info *saved_thread = current_thread;
929
930 /* Before changing the register cache's internal layout, flush the
931 contents of the current valid caches back to the threads, and
932 release the current regcache objects. */
933 regcache_release ();
934
935 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
936
937 current_thread = saved_thread;
938 }
939
940 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
941 PTRACE_GETREGSET. */
942
943 static void
944 x86_linux_process_qsupported (char **features, int count)
945 {
946 int i;
947
948 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
949 with "i386" in qSupported query, it supports x86 XML target
950 descriptions. */
951 use_xml = 0;
952 for (i = 0; i < count; i++)
953 {
954 const char *feature = features[i];
955
956 if (startswith (feature, "xmlRegisters="))
957 {
958 char *copy = xstrdup (feature + 13);
959 char *p;
960
961 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
962 {
963 if (strcmp (p, "i386") == 0)
964 {
965 use_xml = 1;
966 break;
967 }
968 }
969
970 free (copy);
971 }
972 }
973 x86_linux_update_xmltarget ();
974 }
975
976 /* Common for x86/x86-64. */
977
978 static struct regsets_info x86_regsets_info =
979 {
980 x86_regsets, /* regsets */
981 0, /* num_regsets */
982 NULL, /* disabled_regsets */
983 };
984
985 #ifdef __x86_64__
986 static struct regs_info amd64_linux_regs_info =
987 {
988 NULL, /* regset_bitmap */
989 NULL, /* usrregs_info */
990 &x86_regsets_info
991 };
992 #endif
993 static struct usrregs_info i386_linux_usrregs_info =
994 {
995 I386_NUM_REGS,
996 i386_regmap,
997 };
998
999 static struct regs_info i386_linux_regs_info =
1000 {
1001 NULL, /* regset_bitmap */
1002 &i386_linux_usrregs_info,
1003 &x86_regsets_info
1004 };
1005
1006 const struct regs_info *
1007 x86_linux_regs_info (void)
1008 {
1009 #ifdef __x86_64__
1010 if (is_64bit_tdesc ())
1011 return &amd64_linux_regs_info;
1012 else
1013 #endif
1014 return &i386_linux_regs_info;
1015 }
1016
1017 /* Initialize the target description for the architecture of the
1018 inferior. */
1019
1020 static void
1021 x86_arch_setup (void)
1022 {
1023 current_process ()->tdesc = x86_linux_read_description ();
1024 }
1025
1026 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1027 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1028
1029 static void
1030 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1031 {
1032 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1033
1034 if (use_64bit)
1035 {
1036 long l_sysno;
1037
1038 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1039 *sysno = (int) l_sysno;
1040 }
1041 else
1042 collect_register_by_name (regcache, "orig_eax", sysno);
1043 }
1044
1045 static int
1046 x86_supports_tracepoints (void)
1047 {
1048 return 1;
1049 }
1050
1051 static void
1052 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1053 {
1054 write_inferior_memory (*to, buf, len);
1055 *to += len;
1056 }
1057
1058 static int
1059 push_opcode (unsigned char *buf, char *op)
1060 {
1061 unsigned char *buf_org = buf;
1062
1063 while (1)
1064 {
1065 char *endptr;
1066 unsigned long ul = strtoul (op, &endptr, 16);
1067
1068 if (endptr == op)
1069 break;
1070
1071 *buf++ = ul;
1072 op = endptr;
1073 }
1074
1075 return buf - buf_org;
1076 }
1077
1078 #ifdef __x86_64__
1079
1080 /* Build a jump pad that saves registers and calls a collection
1081 function. Writes a jump instruction to the jump pad to
1082 JJUMPAD_INSN. The caller is responsible to write it in at the
1083 tracepoint address. */
1084
1085 static int
1086 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1087 CORE_ADDR collector,
1088 CORE_ADDR lockaddr,
1089 ULONGEST orig_size,
1090 CORE_ADDR *jump_entry,
1091 CORE_ADDR *trampoline,
1092 ULONGEST *trampoline_size,
1093 unsigned char *jjump_pad_insn,
1094 ULONGEST *jjump_pad_insn_size,
1095 CORE_ADDR *adjusted_insn_addr,
1096 CORE_ADDR *adjusted_insn_addr_end,
1097 char *err)
1098 {
1099 unsigned char buf[40];
1100 int i, offset;
1101 int64_t loffset;
1102
1103 CORE_ADDR buildaddr = *jump_entry;
1104
1105 /* Build the jump pad. */
1106
1107 /* First, do tracepoint data collection. Save registers. */
1108 i = 0;
1109 /* Need to ensure stack pointer saved first. */
1110 buf[i++] = 0x54; /* push %rsp */
1111 buf[i++] = 0x55; /* push %rbp */
1112 buf[i++] = 0x57; /* push %rdi */
1113 buf[i++] = 0x56; /* push %rsi */
1114 buf[i++] = 0x52; /* push %rdx */
1115 buf[i++] = 0x51; /* push %rcx */
1116 buf[i++] = 0x53; /* push %rbx */
1117 buf[i++] = 0x50; /* push %rax */
1118 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1119 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1120 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1121 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1122 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1123 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1124 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1125 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1126 buf[i++] = 0x9c; /* pushfq */
1127 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1128 buf[i++] = 0xbf;
1129 memcpy (buf + i, &tpaddr, 8);
1130 i += 8;
1131 buf[i++] = 0x57; /* push %rdi */
1132 append_insns (&buildaddr, i, buf);
1133
1134 /* Stack space for the collecting_t object. */
1135 i = 0;
1136 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1137 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1138 memcpy (buf + i, &tpoint, 8);
1139 i += 8;
1140 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1141 i += push_opcode (&buf[i],
1142 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1143 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1144 append_insns (&buildaddr, i, buf);
1145
1146 /* spin-lock. */
1147 i = 0;
1148 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1149 memcpy (&buf[i], (void *) &lockaddr, 8);
1150 i += 8;
1151 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1152 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1153 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1154 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1155 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1156 append_insns (&buildaddr, i, buf);
1157
1158 /* Set up the gdb_collect call. */
1159 /* At this point, (stack pointer + 0x18) is the base of our saved
1160 register block. */
1161
1162 i = 0;
1163 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1164 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1165
1166 /* tpoint address may be 64-bit wide. */
1167 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1168 memcpy (buf + i, &tpoint, 8);
1169 i += 8;
1170 append_insns (&buildaddr, i, buf);
1171
1172 /* The collector function being in the shared library, may be
1173 >31-bits away off the jump pad. */
1174 i = 0;
1175 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1176 memcpy (buf + i, &collector, 8);
1177 i += 8;
1178 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1179 append_insns (&buildaddr, i, buf);
1180
1181 /* Clear the spin-lock. */
1182 i = 0;
1183 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1184 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1185 memcpy (buf + i, &lockaddr, 8);
1186 i += 8;
1187 append_insns (&buildaddr, i, buf);
1188
1189 /* Remove stack that had been used for the collect_t object. */
1190 i = 0;
1191 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1192 append_insns (&buildaddr, i, buf);
1193
1194 /* Restore register state. */
1195 i = 0;
1196 buf[i++] = 0x48; /* add $0x8,%rsp */
1197 buf[i++] = 0x83;
1198 buf[i++] = 0xc4;
1199 buf[i++] = 0x08;
1200 buf[i++] = 0x9d; /* popfq */
1201 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1202 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1203 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1204 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1205 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1206 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1207 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1208 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1209 buf[i++] = 0x58; /* pop %rax */
1210 buf[i++] = 0x5b; /* pop %rbx */
1211 buf[i++] = 0x59; /* pop %rcx */
1212 buf[i++] = 0x5a; /* pop %rdx */
1213 buf[i++] = 0x5e; /* pop %rsi */
1214 buf[i++] = 0x5f; /* pop %rdi */
1215 buf[i++] = 0x5d; /* pop %rbp */
1216 buf[i++] = 0x5c; /* pop %rsp */
1217 append_insns (&buildaddr, i, buf);
1218
1219 /* Now, adjust the original instruction to execute in the jump
1220 pad. */
1221 *adjusted_insn_addr = buildaddr;
1222 relocate_instruction (&buildaddr, tpaddr);
1223 *adjusted_insn_addr_end = buildaddr;
1224
1225 /* Finally, write a jump back to the program. */
1226
1227 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1228 if (loffset > INT_MAX || loffset < INT_MIN)
1229 {
1230 sprintf (err,
1231 "E.Jump back from jump pad too far from tracepoint "
1232 "(offset 0x%" PRIx64 " > int32).", loffset);
1233 return 1;
1234 }
1235
1236 offset = (int) loffset;
1237 memcpy (buf, jump_insn, sizeof (jump_insn));
1238 memcpy (buf + 1, &offset, 4);
1239 append_insns (&buildaddr, sizeof (jump_insn), buf);
1240
1241 /* The jump pad is now built. Wire in a jump to our jump pad. This
1242 is always done last (by our caller actually), so that we can
1243 install fast tracepoints with threads running. This relies on
1244 the agent's atomic write support. */
1245 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1246 if (loffset > INT_MAX || loffset < INT_MIN)
1247 {
1248 sprintf (err,
1249 "E.Jump pad too far from tracepoint "
1250 "(offset 0x%" PRIx64 " > int32).", loffset);
1251 return 1;
1252 }
1253
1254 offset = (int) loffset;
1255
1256 memcpy (buf, jump_insn, sizeof (jump_insn));
1257 memcpy (buf + 1, &offset, 4);
1258 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1259 *jjump_pad_insn_size = sizeof (jump_insn);
1260
1261 /* Return the end address of our pad. */
1262 *jump_entry = buildaddr;
1263
1264 return 0;
1265 }
1266
1267 #endif /* __x86_64__ */
1268
1269 /* Build a jump pad that saves registers and calls a collection
1270 function. Writes a jump instruction to the jump pad to
1271 JJUMPAD_INSN. The caller is responsible to write it in at the
1272 tracepoint address. */
1273
1274 static int
1275 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1276 CORE_ADDR collector,
1277 CORE_ADDR lockaddr,
1278 ULONGEST orig_size,
1279 CORE_ADDR *jump_entry,
1280 CORE_ADDR *trampoline,
1281 ULONGEST *trampoline_size,
1282 unsigned char *jjump_pad_insn,
1283 ULONGEST *jjump_pad_insn_size,
1284 CORE_ADDR *adjusted_insn_addr,
1285 CORE_ADDR *adjusted_insn_addr_end,
1286 char *err)
1287 {
1288 unsigned char buf[0x100];
1289 int i, offset;
1290 CORE_ADDR buildaddr = *jump_entry;
1291
1292 /* Build the jump pad. */
1293
1294 /* First, do tracepoint data collection. Save registers. */
1295 i = 0;
1296 buf[i++] = 0x60; /* pushad */
1297 buf[i++] = 0x68; /* push tpaddr aka $pc */
1298 *((int *)(buf + i)) = (int) tpaddr;
1299 i += 4;
1300 buf[i++] = 0x9c; /* pushf */
1301 buf[i++] = 0x1e; /* push %ds */
1302 buf[i++] = 0x06; /* push %es */
1303 buf[i++] = 0x0f; /* push %fs */
1304 buf[i++] = 0xa0;
1305 buf[i++] = 0x0f; /* push %gs */
1306 buf[i++] = 0xa8;
1307 buf[i++] = 0x16; /* push %ss */
1308 buf[i++] = 0x0e; /* push %cs */
1309 append_insns (&buildaddr, i, buf);
1310
1311 /* Stack space for the collecting_t object. */
1312 i = 0;
1313 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1314
1315 /* Build the object. */
1316 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1317 memcpy (buf + i, &tpoint, 4);
1318 i += 4;
1319 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1320
1321 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1322 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1323 append_insns (&buildaddr, i, buf);
1324
1325 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1326 If we cared for it, this could be using xchg alternatively. */
1327
1328 i = 0;
1329 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1330 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1331 %esp,<lockaddr> */
1332 memcpy (&buf[i], (void *) &lockaddr, 4);
1333 i += 4;
1334 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1335 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1336 append_insns (&buildaddr, i, buf);
1337
1338
1339 /* Set up arguments to the gdb_collect call. */
1340 i = 0;
1341 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1342 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1343 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1344 append_insns (&buildaddr, i, buf);
1345
1346 i = 0;
1347 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1348 append_insns (&buildaddr, i, buf);
1349
1350 i = 0;
1351 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1352 memcpy (&buf[i], (void *) &tpoint, 4);
1353 i += 4;
1354 append_insns (&buildaddr, i, buf);
1355
1356 buf[0] = 0xe8; /* call <reladdr> */
1357 offset = collector - (buildaddr + sizeof (jump_insn));
1358 memcpy (buf + 1, &offset, 4);
1359 append_insns (&buildaddr, 5, buf);
1360 /* Clean up after the call. */
1361 buf[0] = 0x83; /* add $0x8,%esp */
1362 buf[1] = 0xc4;
1363 buf[2] = 0x08;
1364 append_insns (&buildaddr, 3, buf);
1365
1366
1367 /* Clear the spin-lock. This would need the LOCK prefix on older
1368 broken archs. */
1369 i = 0;
1370 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1371 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1372 memcpy (buf + i, &lockaddr, 4);
1373 i += 4;
1374 append_insns (&buildaddr, i, buf);
1375
1376
1377 /* Remove stack that had been used for the collect_t object. */
1378 i = 0;
1379 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1380 append_insns (&buildaddr, i, buf);
1381
1382 i = 0;
1383 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1384 buf[i++] = 0xc4;
1385 buf[i++] = 0x04;
1386 buf[i++] = 0x17; /* pop %ss */
1387 buf[i++] = 0x0f; /* pop %gs */
1388 buf[i++] = 0xa9;
1389 buf[i++] = 0x0f; /* pop %fs */
1390 buf[i++] = 0xa1;
1391 buf[i++] = 0x07; /* pop %es */
1392 buf[i++] = 0x1f; /* pop %ds */
1393 buf[i++] = 0x9d; /* popf */
1394 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1395 buf[i++] = 0xc4;
1396 buf[i++] = 0x04;
1397 buf[i++] = 0x61; /* popad */
1398 append_insns (&buildaddr, i, buf);
1399
1400 /* Now, adjust the original instruction to execute in the jump
1401 pad. */
1402 *adjusted_insn_addr = buildaddr;
1403 relocate_instruction (&buildaddr, tpaddr);
1404 *adjusted_insn_addr_end = buildaddr;
1405
1406 /* Write the jump back to the program. */
1407 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1408 memcpy (buf, jump_insn, sizeof (jump_insn));
1409 memcpy (buf + 1, &offset, 4);
1410 append_insns (&buildaddr, sizeof (jump_insn), buf);
1411
1412 /* The jump pad is now built. Wire in a jump to our jump pad. This
1413 is always done last (by our caller actually), so that we can
1414 install fast tracepoints with threads running. This relies on
1415 the agent's atomic write support. */
1416 if (orig_size == 4)
1417 {
1418 /* Create a trampoline. */
1419 *trampoline_size = sizeof (jump_insn);
1420 if (!claim_trampoline_space (*trampoline_size, trampoline))
1421 {
1422 /* No trampoline space available. */
1423 strcpy (err,
1424 "E.Cannot allocate trampoline space needed for fast "
1425 "tracepoints on 4-byte instructions.");
1426 return 1;
1427 }
1428
1429 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1430 memcpy (buf, jump_insn, sizeof (jump_insn));
1431 memcpy (buf + 1, &offset, 4);
1432 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1433
1434 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1435 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1436 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1437 memcpy (buf + 2, &offset, 2);
1438 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1439 *jjump_pad_insn_size = sizeof (small_jump_insn);
1440 }
1441 else
1442 {
1443 /* Else use a 32-bit relative jump instruction. */
1444 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1445 memcpy (buf, jump_insn, sizeof (jump_insn));
1446 memcpy (buf + 1, &offset, 4);
1447 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1448 *jjump_pad_insn_size = sizeof (jump_insn);
1449 }
1450
1451 /* Return the end address of our pad. */
1452 *jump_entry = buildaddr;
1453
1454 return 0;
1455 }
1456
1457 static int
1458 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1459 CORE_ADDR collector,
1460 CORE_ADDR lockaddr,
1461 ULONGEST orig_size,
1462 CORE_ADDR *jump_entry,
1463 CORE_ADDR *trampoline,
1464 ULONGEST *trampoline_size,
1465 unsigned char *jjump_pad_insn,
1466 ULONGEST *jjump_pad_insn_size,
1467 CORE_ADDR *adjusted_insn_addr,
1468 CORE_ADDR *adjusted_insn_addr_end,
1469 char *err)
1470 {
1471 #ifdef __x86_64__
1472 if (is_64bit_tdesc ())
1473 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1474 collector, lockaddr,
1475 orig_size, jump_entry,
1476 trampoline, trampoline_size,
1477 jjump_pad_insn,
1478 jjump_pad_insn_size,
1479 adjusted_insn_addr,
1480 adjusted_insn_addr_end,
1481 err);
1482 #endif
1483
1484 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1485 collector, lockaddr,
1486 orig_size, jump_entry,
1487 trampoline, trampoline_size,
1488 jjump_pad_insn,
1489 jjump_pad_insn_size,
1490 adjusted_insn_addr,
1491 adjusted_insn_addr_end,
1492 err);
1493 }
1494
1495 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1496 architectures. */
1497
1498 static int
1499 x86_get_min_fast_tracepoint_insn_len (void)
1500 {
1501 static int warned_about_fast_tracepoints = 0;
1502
1503 #ifdef __x86_64__
1504 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1505 used for fast tracepoints. */
1506 if (is_64bit_tdesc ())
1507 return 5;
1508 #endif
1509
1510 if (agent_loaded_p ())
1511 {
1512 char errbuf[IPA_BUFSIZ];
1513
1514 errbuf[0] = '\0';
1515
1516 /* On x86, if trampolines are available, then 4-byte jump instructions
1517 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1518 with a 4-byte offset are used instead. */
1519 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1520 return 4;
1521 else
1522 {
1523 /* GDB has no channel to explain to user why a shorter fast
1524 tracepoint is not possible, but at least make GDBserver
1525 mention that something has gone awry. */
1526 if (!warned_about_fast_tracepoints)
1527 {
1528 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1529 warned_about_fast_tracepoints = 1;
1530 }
1531 return 5;
1532 }
1533 }
1534 else
1535 {
1536 /* Indicate that the minimum length is currently unknown since the IPA
1537 has not loaded yet. */
1538 return 0;
1539 }
1540 }
1541
1542 static void
1543 add_insns (unsigned char *start, int len)
1544 {
1545 CORE_ADDR buildaddr = current_insn_ptr;
1546
1547 if (debug_threads)
1548 debug_printf ("Adding %d bytes of insn at %s\n",
1549 len, paddress (buildaddr));
1550
1551 append_insns (&buildaddr, len, start);
1552 current_insn_ptr = buildaddr;
1553 }
1554
1555 /* Our general strategy for emitting code is to avoid specifying raw
1556 bytes whenever possible, and instead copy a block of inline asm
1557 that is embedded in the function. This is a little messy, because
1558 we need to keep the compiler from discarding what looks like dead
1559 code, plus suppress various warnings. */
1560
1561 #define EMIT_ASM(NAME, INSNS) \
1562 do \
1563 { \
1564 extern unsigned char start_ ## NAME, end_ ## NAME; \
1565 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1566 __asm__ ("jmp end_" #NAME "\n" \
1567 "\t" "start_" #NAME ":" \
1568 "\t" INSNS "\n" \
1569 "\t" "end_" #NAME ":"); \
1570 } while (0)
1571
1572 #ifdef __x86_64__
1573
1574 #define EMIT_ASM32(NAME,INSNS) \
1575 do \
1576 { \
1577 extern unsigned char start_ ## NAME, end_ ## NAME; \
1578 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1579 __asm__ (".code32\n" \
1580 "\t" "jmp end_" #NAME "\n" \
1581 "\t" "start_" #NAME ":\n" \
1582 "\t" INSNS "\n" \
1583 "\t" "end_" #NAME ":\n" \
1584 ".code64\n"); \
1585 } while (0)
1586
1587 #else
1588
1589 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1590
1591 #endif
1592
1593 #ifdef __x86_64__
1594
1595 static void
1596 amd64_emit_prologue (void)
1597 {
1598 EMIT_ASM (amd64_prologue,
1599 "pushq %rbp\n\t"
1600 "movq %rsp,%rbp\n\t"
1601 "sub $0x20,%rsp\n\t"
1602 "movq %rdi,-8(%rbp)\n\t"
1603 "movq %rsi,-16(%rbp)");
1604 }
1605
1606
1607 static void
1608 amd64_emit_epilogue (void)
1609 {
1610 EMIT_ASM (amd64_epilogue,
1611 "movq -16(%rbp),%rdi\n\t"
1612 "movq %rax,(%rdi)\n\t"
1613 "xor %rax,%rax\n\t"
1614 "leave\n\t"
1615 "ret");
1616 }
1617
1618 static void
1619 amd64_emit_add (void)
1620 {
1621 EMIT_ASM (amd64_add,
1622 "add (%rsp),%rax\n\t"
1623 "lea 0x8(%rsp),%rsp");
1624 }
1625
1626 static void
1627 amd64_emit_sub (void)
1628 {
1629 EMIT_ASM (amd64_sub,
1630 "sub %rax,(%rsp)\n\t"
1631 "pop %rax");
1632 }
1633
1634 static void
1635 amd64_emit_mul (void)
1636 {
1637 emit_error = 1;
1638 }
1639
1640 static void
1641 amd64_emit_lsh (void)
1642 {
1643 emit_error = 1;
1644 }
1645
1646 static void
1647 amd64_emit_rsh_signed (void)
1648 {
1649 emit_error = 1;
1650 }
1651
1652 static void
1653 amd64_emit_rsh_unsigned (void)
1654 {
1655 emit_error = 1;
1656 }
1657
1658 static void
1659 amd64_emit_ext (int arg)
1660 {
1661 switch (arg)
1662 {
1663 case 8:
1664 EMIT_ASM (amd64_ext_8,
1665 "cbtw\n\t"
1666 "cwtl\n\t"
1667 "cltq");
1668 break;
1669 case 16:
1670 EMIT_ASM (amd64_ext_16,
1671 "cwtl\n\t"
1672 "cltq");
1673 break;
1674 case 32:
1675 EMIT_ASM (amd64_ext_32,
1676 "cltq");
1677 break;
1678 default:
1679 emit_error = 1;
1680 }
1681 }
1682
1683 static void
1684 amd64_emit_log_not (void)
1685 {
1686 EMIT_ASM (amd64_log_not,
1687 "test %rax,%rax\n\t"
1688 "sete %cl\n\t"
1689 "movzbq %cl,%rax");
1690 }
1691
1692 static void
1693 amd64_emit_bit_and (void)
1694 {
1695 EMIT_ASM (amd64_and,
1696 "and (%rsp),%rax\n\t"
1697 "lea 0x8(%rsp),%rsp");
1698 }
1699
1700 static void
1701 amd64_emit_bit_or (void)
1702 {
1703 EMIT_ASM (amd64_or,
1704 "or (%rsp),%rax\n\t"
1705 "lea 0x8(%rsp),%rsp");
1706 }
1707
1708 static void
1709 amd64_emit_bit_xor (void)
1710 {
1711 EMIT_ASM (amd64_xor,
1712 "xor (%rsp),%rax\n\t"
1713 "lea 0x8(%rsp),%rsp");
1714 }
1715
1716 static void
1717 amd64_emit_bit_not (void)
1718 {
1719 EMIT_ASM (amd64_bit_not,
1720 "xorq $0xffffffffffffffff,%rax");
1721 }
1722
1723 static void
1724 amd64_emit_equal (void)
1725 {
1726 EMIT_ASM (amd64_equal,
1727 "cmp %rax,(%rsp)\n\t"
1728 "je .Lamd64_equal_true\n\t"
1729 "xor %rax,%rax\n\t"
1730 "jmp .Lamd64_equal_end\n\t"
1731 ".Lamd64_equal_true:\n\t"
1732 "mov $0x1,%rax\n\t"
1733 ".Lamd64_equal_end:\n\t"
1734 "lea 0x8(%rsp),%rsp");
1735 }
1736
1737 static void
1738 amd64_emit_less_signed (void)
1739 {
1740 EMIT_ASM (amd64_less_signed,
1741 "cmp %rax,(%rsp)\n\t"
1742 "jl .Lamd64_less_signed_true\n\t"
1743 "xor %rax,%rax\n\t"
1744 "jmp .Lamd64_less_signed_end\n\t"
1745 ".Lamd64_less_signed_true:\n\t"
1746 "mov $1,%rax\n\t"
1747 ".Lamd64_less_signed_end:\n\t"
1748 "lea 0x8(%rsp),%rsp");
1749 }
1750
1751 static void
1752 amd64_emit_less_unsigned (void)
1753 {
1754 EMIT_ASM (amd64_less_unsigned,
1755 "cmp %rax,(%rsp)\n\t"
1756 "jb .Lamd64_less_unsigned_true\n\t"
1757 "xor %rax,%rax\n\t"
1758 "jmp .Lamd64_less_unsigned_end\n\t"
1759 ".Lamd64_less_unsigned_true:\n\t"
1760 "mov $1,%rax\n\t"
1761 ".Lamd64_less_unsigned_end:\n\t"
1762 "lea 0x8(%rsp),%rsp");
1763 }
1764
1765 static void
1766 amd64_emit_ref (int size)
1767 {
1768 switch (size)
1769 {
1770 case 1:
1771 EMIT_ASM (amd64_ref1,
1772 "movb (%rax),%al");
1773 break;
1774 case 2:
1775 EMIT_ASM (amd64_ref2,
1776 "movw (%rax),%ax");
1777 break;
1778 case 4:
1779 EMIT_ASM (amd64_ref4,
1780 "movl (%rax),%eax");
1781 break;
1782 case 8:
1783 EMIT_ASM (amd64_ref8,
1784 "movq (%rax),%rax");
1785 break;
1786 }
1787 }
1788
1789 static void
1790 amd64_emit_if_goto (int *offset_p, int *size_p)
1791 {
1792 EMIT_ASM (amd64_if_goto,
1793 "mov %rax,%rcx\n\t"
1794 "pop %rax\n\t"
1795 "cmp $0,%rcx\n\t"
1796 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1797 if (offset_p)
1798 *offset_p = 10;
1799 if (size_p)
1800 *size_p = 4;
1801 }
1802
1803 static void
1804 amd64_emit_goto (int *offset_p, int *size_p)
1805 {
1806 EMIT_ASM (amd64_goto,
1807 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1808 if (offset_p)
1809 *offset_p = 1;
1810 if (size_p)
1811 *size_p = 4;
1812 }
1813
1814 static void
1815 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1816 {
1817 int diff = (to - (from + size));
1818 unsigned char buf[sizeof (int)];
1819
1820 if (size != 4)
1821 {
1822 emit_error = 1;
1823 return;
1824 }
1825
1826 memcpy (buf, &diff, sizeof (int));
1827 write_inferior_memory (from, buf, sizeof (int));
1828 }
1829
1830 static void
1831 amd64_emit_const (LONGEST num)
1832 {
1833 unsigned char buf[16];
1834 int i;
1835 CORE_ADDR buildaddr = current_insn_ptr;
1836
1837 i = 0;
1838 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1839 memcpy (&buf[i], &num, sizeof (num));
1840 i += 8;
1841 append_insns (&buildaddr, i, buf);
1842 current_insn_ptr = buildaddr;
1843 }
1844
1845 static void
1846 amd64_emit_call (CORE_ADDR fn)
1847 {
1848 unsigned char buf[16];
1849 int i;
1850 CORE_ADDR buildaddr;
1851 LONGEST offset64;
1852
1853 /* The destination function being in the shared library, may be
1854 >31-bits away off the compiled code pad. */
1855
1856 buildaddr = current_insn_ptr;
1857
1858 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1859
1860 i = 0;
1861
1862 if (offset64 > INT_MAX || offset64 < INT_MIN)
1863 {
1864 /* Offset is too large for a call. Use callq, but that requires
1865 a register, so avoid it if possible. Use r10, since it is
1866 call-clobbered, we don't have to push/pop it. */
1867 buf[i++] = 0x48; /* mov $fn,%r10 */
1868 buf[i++] = 0xba;
1869 memcpy (buf + i, &fn, 8);
1870 i += 8;
1871 buf[i++] = 0xff; /* callq *%r10 */
1872 buf[i++] = 0xd2;
1873 }
1874 else
1875 {
1876 int offset32 = offset64; /* we know we can't overflow here. */
1877
1878 buf[i++] = 0xe8; /* call <reladdr> */
1879 memcpy (buf + i, &offset32, 4);
1880 i += 4;
1881 }
1882
1883 append_insns (&buildaddr, i, buf);
1884 current_insn_ptr = buildaddr;
1885 }
1886
1887 static void
1888 amd64_emit_reg (int reg)
1889 {
1890 unsigned char buf[16];
1891 int i;
1892 CORE_ADDR buildaddr;
1893
1894 /* Assume raw_regs is still in %rdi. */
1895 buildaddr = current_insn_ptr;
1896 i = 0;
1897 buf[i++] = 0xbe; /* mov $<n>,%esi */
1898 memcpy (&buf[i], &reg, sizeof (reg));
1899 i += 4;
1900 append_insns (&buildaddr, i, buf);
1901 current_insn_ptr = buildaddr;
1902 amd64_emit_call (get_raw_reg_func_addr ());
1903 }
1904
1905 static void
1906 amd64_emit_pop (void)
1907 {
1908 EMIT_ASM (amd64_pop,
1909 "pop %rax");
1910 }
1911
1912 static void
1913 amd64_emit_stack_flush (void)
1914 {
1915 EMIT_ASM (amd64_stack_flush,
1916 "push %rax");
1917 }
1918
1919 static void
1920 amd64_emit_zero_ext (int arg)
1921 {
1922 switch (arg)
1923 {
1924 case 8:
1925 EMIT_ASM (amd64_zero_ext_8,
1926 "and $0xff,%rax");
1927 break;
1928 case 16:
1929 EMIT_ASM (amd64_zero_ext_16,
1930 "and $0xffff,%rax");
1931 break;
1932 case 32:
1933 EMIT_ASM (amd64_zero_ext_32,
1934 "mov $0xffffffff,%rcx\n\t"
1935 "and %rcx,%rax");
1936 break;
1937 default:
1938 emit_error = 1;
1939 }
1940 }
1941
1942 static void
1943 amd64_emit_swap (void)
1944 {
1945 EMIT_ASM (amd64_swap,
1946 "mov %rax,%rcx\n\t"
1947 "pop %rax\n\t"
1948 "push %rcx");
1949 }
1950
1951 static void
1952 amd64_emit_stack_adjust (int n)
1953 {
1954 unsigned char buf[16];
1955 int i;
1956 CORE_ADDR buildaddr = current_insn_ptr;
1957
1958 i = 0;
1959 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1960 buf[i++] = 0x8d;
1961 buf[i++] = 0x64;
1962 buf[i++] = 0x24;
1963 /* This only handles adjustments up to 16, but we don't expect any more. */
1964 buf[i++] = n * 8;
1965 append_insns (&buildaddr, i, buf);
1966 current_insn_ptr = buildaddr;
1967 }
1968
1969 /* FN's prototype is `LONGEST(*fn)(int)'. */
1970
1971 static void
1972 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1973 {
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 buildaddr = current_insn_ptr;
1979 i = 0;
1980 buf[i++] = 0xbf; /* movl $<n>,%edi */
1981 memcpy (&buf[i], &arg1, sizeof (arg1));
1982 i += 4;
1983 append_insns (&buildaddr, i, buf);
1984 current_insn_ptr = buildaddr;
1985 amd64_emit_call (fn);
1986 }
1987
1988 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1989
1990 static void
1991 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1992 {
1993 unsigned char buf[16];
1994 int i;
1995 CORE_ADDR buildaddr;
1996
1997 buildaddr = current_insn_ptr;
1998 i = 0;
1999 buf[i++] = 0xbf; /* movl $<n>,%edi */
2000 memcpy (&buf[i], &arg1, sizeof (arg1));
2001 i += 4;
2002 append_insns (&buildaddr, i, buf);
2003 current_insn_ptr = buildaddr;
2004 EMIT_ASM (amd64_void_call_2_a,
2005 /* Save away a copy of the stack top. */
2006 "push %rax\n\t"
2007 /* Also pass top as the second argument. */
2008 "mov %rax,%rsi");
2009 amd64_emit_call (fn);
2010 EMIT_ASM (amd64_void_call_2_b,
2011 /* Restore the stack top, %rax may have been trashed. */
2012 "pop %rax");
2013 }
2014
2015 void
2016 amd64_emit_eq_goto (int *offset_p, int *size_p)
2017 {
2018 EMIT_ASM (amd64_eq,
2019 "cmp %rax,(%rsp)\n\t"
2020 "jne .Lamd64_eq_fallthru\n\t"
2021 "lea 0x8(%rsp),%rsp\n\t"
2022 "pop %rax\n\t"
2023 /* jmp, but don't trust the assembler to choose the right jump */
2024 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2025 ".Lamd64_eq_fallthru:\n\t"
2026 "lea 0x8(%rsp),%rsp\n\t"
2027 "pop %rax");
2028
2029 if (offset_p)
2030 *offset_p = 13;
2031 if (size_p)
2032 *size_p = 4;
2033 }
2034
2035 void
2036 amd64_emit_ne_goto (int *offset_p, int *size_p)
2037 {
2038 EMIT_ASM (amd64_ne,
2039 "cmp %rax,(%rsp)\n\t"
2040 "je .Lamd64_ne_fallthru\n\t"
2041 "lea 0x8(%rsp),%rsp\n\t"
2042 "pop %rax\n\t"
2043 /* jmp, but don't trust the assembler to choose the right jump */
2044 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2045 ".Lamd64_ne_fallthru:\n\t"
2046 "lea 0x8(%rsp),%rsp\n\t"
2047 "pop %rax");
2048
2049 if (offset_p)
2050 *offset_p = 13;
2051 if (size_p)
2052 *size_p = 4;
2053 }
2054
2055 void
2056 amd64_emit_lt_goto (int *offset_p, int *size_p)
2057 {
2058 EMIT_ASM (amd64_lt,
2059 "cmp %rax,(%rsp)\n\t"
2060 "jnl .Lamd64_lt_fallthru\n\t"
2061 "lea 0x8(%rsp),%rsp\n\t"
2062 "pop %rax\n\t"
2063 /* jmp, but don't trust the assembler to choose the right jump */
2064 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2065 ".Lamd64_lt_fallthru:\n\t"
2066 "lea 0x8(%rsp),%rsp\n\t"
2067 "pop %rax");
2068
2069 if (offset_p)
2070 *offset_p = 13;
2071 if (size_p)
2072 *size_p = 4;
2073 }
2074
2075 void
2076 amd64_emit_le_goto (int *offset_p, int *size_p)
2077 {
2078 EMIT_ASM (amd64_le,
2079 "cmp %rax,(%rsp)\n\t"
2080 "jnle .Lamd64_le_fallthru\n\t"
2081 "lea 0x8(%rsp),%rsp\n\t"
2082 "pop %rax\n\t"
2083 /* jmp, but don't trust the assembler to choose the right jump */
2084 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2085 ".Lamd64_le_fallthru:\n\t"
2086 "lea 0x8(%rsp),%rsp\n\t"
2087 "pop %rax");
2088
2089 if (offset_p)
2090 *offset_p = 13;
2091 if (size_p)
2092 *size_p = 4;
2093 }
2094
2095 void
2096 amd64_emit_gt_goto (int *offset_p, int *size_p)
2097 {
2098 EMIT_ASM (amd64_gt,
2099 "cmp %rax,(%rsp)\n\t"
2100 "jng .Lamd64_gt_fallthru\n\t"
2101 "lea 0x8(%rsp),%rsp\n\t"
2102 "pop %rax\n\t"
2103 /* jmp, but don't trust the assembler to choose the right jump */
2104 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2105 ".Lamd64_gt_fallthru:\n\t"
2106 "lea 0x8(%rsp),%rsp\n\t"
2107 "pop %rax");
2108
2109 if (offset_p)
2110 *offset_p = 13;
2111 if (size_p)
2112 *size_p = 4;
2113 }
2114
2115 void
2116 amd64_emit_ge_goto (int *offset_p, int *size_p)
2117 {
2118 EMIT_ASM (amd64_ge,
2119 "cmp %rax,(%rsp)\n\t"
2120 "jnge .Lamd64_ge_fallthru\n\t"
2121 ".Lamd64_ge_jump:\n\t"
2122 "lea 0x8(%rsp),%rsp\n\t"
2123 "pop %rax\n\t"
2124 /* jmp, but don't trust the assembler to choose the right jump */
2125 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2126 ".Lamd64_ge_fallthru:\n\t"
2127 "lea 0x8(%rsp),%rsp\n\t"
2128 "pop %rax");
2129
2130 if (offset_p)
2131 *offset_p = 13;
2132 if (size_p)
2133 *size_p = 4;
2134 }
2135
2136 struct emit_ops amd64_emit_ops =
2137 {
2138 amd64_emit_prologue,
2139 amd64_emit_epilogue,
2140 amd64_emit_add,
2141 amd64_emit_sub,
2142 amd64_emit_mul,
2143 amd64_emit_lsh,
2144 amd64_emit_rsh_signed,
2145 amd64_emit_rsh_unsigned,
2146 amd64_emit_ext,
2147 amd64_emit_log_not,
2148 amd64_emit_bit_and,
2149 amd64_emit_bit_or,
2150 amd64_emit_bit_xor,
2151 amd64_emit_bit_not,
2152 amd64_emit_equal,
2153 amd64_emit_less_signed,
2154 amd64_emit_less_unsigned,
2155 amd64_emit_ref,
2156 amd64_emit_if_goto,
2157 amd64_emit_goto,
2158 amd64_write_goto_address,
2159 amd64_emit_const,
2160 amd64_emit_call,
2161 amd64_emit_reg,
2162 amd64_emit_pop,
2163 amd64_emit_stack_flush,
2164 amd64_emit_zero_ext,
2165 amd64_emit_swap,
2166 amd64_emit_stack_adjust,
2167 amd64_emit_int_call_1,
2168 amd64_emit_void_call_2,
2169 amd64_emit_eq_goto,
2170 amd64_emit_ne_goto,
2171 amd64_emit_lt_goto,
2172 amd64_emit_le_goto,
2173 amd64_emit_gt_goto,
2174 amd64_emit_ge_goto
2175 };
2176
2177 #endif /* __x86_64__ */
2178
2179 static void
2180 i386_emit_prologue (void)
2181 {
2182 EMIT_ASM32 (i386_prologue,
2183 "push %ebp\n\t"
2184 "mov %esp,%ebp\n\t"
2185 "push %ebx");
2186 /* At this point, the raw regs base address is at 8(%ebp), and the
2187 value pointer is at 12(%ebp). */
2188 }
2189
2190 static void
2191 i386_emit_epilogue (void)
2192 {
2193 EMIT_ASM32 (i386_epilogue,
2194 "mov 12(%ebp),%ecx\n\t"
2195 "mov %eax,(%ecx)\n\t"
2196 "mov %ebx,0x4(%ecx)\n\t"
2197 "xor %eax,%eax\n\t"
2198 "pop %ebx\n\t"
2199 "pop %ebp\n\t"
2200 "ret");
2201 }
2202
2203 static void
2204 i386_emit_add (void)
2205 {
2206 EMIT_ASM32 (i386_add,
2207 "add (%esp),%eax\n\t"
2208 "adc 0x4(%esp),%ebx\n\t"
2209 "lea 0x8(%esp),%esp");
2210 }
2211
2212 static void
2213 i386_emit_sub (void)
2214 {
2215 EMIT_ASM32 (i386_sub,
2216 "subl %eax,(%esp)\n\t"
2217 "sbbl %ebx,4(%esp)\n\t"
2218 "pop %eax\n\t"
2219 "pop %ebx\n\t");
2220 }
2221
2222 static void
2223 i386_emit_mul (void)
2224 {
2225 emit_error = 1;
2226 }
2227
2228 static void
2229 i386_emit_lsh (void)
2230 {
2231 emit_error = 1;
2232 }
2233
2234 static void
2235 i386_emit_rsh_signed (void)
2236 {
2237 emit_error = 1;
2238 }
2239
2240 static void
2241 i386_emit_rsh_unsigned (void)
2242 {
2243 emit_error = 1;
2244 }
2245
2246 static void
2247 i386_emit_ext (int arg)
2248 {
2249 switch (arg)
2250 {
2251 case 8:
2252 EMIT_ASM32 (i386_ext_8,
2253 "cbtw\n\t"
2254 "cwtl\n\t"
2255 "movl %eax,%ebx\n\t"
2256 "sarl $31,%ebx");
2257 break;
2258 case 16:
2259 EMIT_ASM32 (i386_ext_16,
2260 "cwtl\n\t"
2261 "movl %eax,%ebx\n\t"
2262 "sarl $31,%ebx");
2263 break;
2264 case 32:
2265 EMIT_ASM32 (i386_ext_32,
2266 "movl %eax,%ebx\n\t"
2267 "sarl $31,%ebx");
2268 break;
2269 default:
2270 emit_error = 1;
2271 }
2272 }
2273
2274 static void
2275 i386_emit_log_not (void)
2276 {
2277 EMIT_ASM32 (i386_log_not,
2278 "or %ebx,%eax\n\t"
2279 "test %eax,%eax\n\t"
2280 "sete %cl\n\t"
2281 "xor %ebx,%ebx\n\t"
2282 "movzbl %cl,%eax");
2283 }
2284
2285 static void
2286 i386_emit_bit_and (void)
2287 {
2288 EMIT_ASM32 (i386_and,
2289 "and (%esp),%eax\n\t"
2290 "and 0x4(%esp),%ebx\n\t"
2291 "lea 0x8(%esp),%esp");
2292 }
2293
2294 static void
2295 i386_emit_bit_or (void)
2296 {
2297 EMIT_ASM32 (i386_or,
2298 "or (%esp),%eax\n\t"
2299 "or 0x4(%esp),%ebx\n\t"
2300 "lea 0x8(%esp),%esp");
2301 }
2302
2303 static void
2304 i386_emit_bit_xor (void)
2305 {
2306 EMIT_ASM32 (i386_xor,
2307 "xor (%esp),%eax\n\t"
2308 "xor 0x4(%esp),%ebx\n\t"
2309 "lea 0x8(%esp),%esp");
2310 }
2311
2312 static void
2313 i386_emit_bit_not (void)
2314 {
2315 EMIT_ASM32 (i386_bit_not,
2316 "xor $0xffffffff,%eax\n\t"
2317 "xor $0xffffffff,%ebx\n\t");
2318 }
2319
2320 static void
2321 i386_emit_equal (void)
2322 {
2323 EMIT_ASM32 (i386_equal,
2324 "cmpl %ebx,4(%esp)\n\t"
2325 "jne .Li386_equal_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "je .Li386_equal_true\n\t"
2328 ".Li386_equal_false:\n\t"
2329 "xor %eax,%eax\n\t"
2330 "jmp .Li386_equal_end\n\t"
2331 ".Li386_equal_true:\n\t"
2332 "mov $1,%eax\n\t"
2333 ".Li386_equal_end:\n\t"
2334 "xor %ebx,%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336 }
2337
2338 static void
2339 i386_emit_less_signed (void)
2340 {
2341 EMIT_ASM32 (i386_less_signed,
2342 "cmpl %ebx,4(%esp)\n\t"
2343 "jl .Li386_less_signed_true\n\t"
2344 "jne .Li386_less_signed_false\n\t"
2345 "cmpl %eax,(%esp)\n\t"
2346 "jl .Li386_less_signed_true\n\t"
2347 ".Li386_less_signed_false:\n\t"
2348 "xor %eax,%eax\n\t"
2349 "jmp .Li386_less_signed_end\n\t"
2350 ".Li386_less_signed_true:\n\t"
2351 "mov $1,%eax\n\t"
2352 ".Li386_less_signed_end:\n\t"
2353 "xor %ebx,%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355 }
2356
2357 static void
2358 i386_emit_less_unsigned (void)
2359 {
2360 EMIT_ASM32 (i386_less_unsigned,
2361 "cmpl %ebx,4(%esp)\n\t"
2362 "jb .Li386_less_unsigned_true\n\t"
2363 "jne .Li386_less_unsigned_false\n\t"
2364 "cmpl %eax,(%esp)\n\t"
2365 "jb .Li386_less_unsigned_true\n\t"
2366 ".Li386_less_unsigned_false:\n\t"
2367 "xor %eax,%eax\n\t"
2368 "jmp .Li386_less_unsigned_end\n\t"
2369 ".Li386_less_unsigned_true:\n\t"
2370 "mov $1,%eax\n\t"
2371 ".Li386_less_unsigned_end:\n\t"
2372 "xor %ebx,%ebx\n\t"
2373 "lea 0x8(%esp),%esp");
2374 }
2375
2376 static void
2377 i386_emit_ref (int size)
2378 {
2379 switch (size)
2380 {
2381 case 1:
2382 EMIT_ASM32 (i386_ref1,
2383 "movb (%eax),%al");
2384 break;
2385 case 2:
2386 EMIT_ASM32 (i386_ref2,
2387 "movw (%eax),%ax");
2388 break;
2389 case 4:
2390 EMIT_ASM32 (i386_ref4,
2391 "movl (%eax),%eax");
2392 break;
2393 case 8:
2394 EMIT_ASM32 (i386_ref8,
2395 "movl 4(%eax),%ebx\n\t"
2396 "movl (%eax),%eax");
2397 break;
2398 }
2399 }
2400
2401 static void
2402 i386_emit_if_goto (int *offset_p, int *size_p)
2403 {
2404 EMIT_ASM32 (i386_if_goto,
2405 "mov %eax,%ecx\n\t"
2406 "or %ebx,%ecx\n\t"
2407 "pop %eax\n\t"
2408 "pop %ebx\n\t"
2409 "cmpl $0,%ecx\n\t"
2410 /* Don't trust the assembler to choose the right jump */
2411 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2412
2413 if (offset_p)
2414 *offset_p = 11; /* be sure that this matches the sequence above */
2415 if (size_p)
2416 *size_p = 4;
2417 }
2418
2419 static void
2420 i386_emit_goto (int *offset_p, int *size_p)
2421 {
2422 EMIT_ASM32 (i386_goto,
2423 /* Don't trust the assembler to choose the right jump */
2424 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2425 if (offset_p)
2426 *offset_p = 1;
2427 if (size_p)
2428 *size_p = 4;
2429 }
2430
2431 static void
2432 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2433 {
2434 int diff = (to - (from + size));
2435 unsigned char buf[sizeof (int)];
2436
2437 /* We're only doing 4-byte sizes at the moment. */
2438 if (size != 4)
2439 {
2440 emit_error = 1;
2441 return;
2442 }
2443
2444 memcpy (buf, &diff, sizeof (int));
2445 write_inferior_memory (from, buf, sizeof (int));
2446 }
2447
2448 static void
2449 i386_emit_const (LONGEST num)
2450 {
2451 unsigned char buf[16];
2452 int i, hi, lo;
2453 CORE_ADDR buildaddr = current_insn_ptr;
2454
2455 i = 0;
2456 buf[i++] = 0xb8; /* mov $<n>,%eax */
2457 lo = num & 0xffffffff;
2458 memcpy (&buf[i], &lo, sizeof (lo));
2459 i += 4;
2460 hi = ((num >> 32) & 0xffffffff);
2461 if (hi)
2462 {
2463 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2464 memcpy (&buf[i], &hi, sizeof (hi));
2465 i += 4;
2466 }
2467 else
2468 {
2469 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2470 }
2471 append_insns (&buildaddr, i, buf);
2472 current_insn_ptr = buildaddr;
2473 }
2474
2475 static void
2476 i386_emit_call (CORE_ADDR fn)
2477 {
2478 unsigned char buf[16];
2479 int i, offset;
2480 CORE_ADDR buildaddr;
2481
2482 buildaddr = current_insn_ptr;
2483 i = 0;
2484 buf[i++] = 0xe8; /* call <reladdr> */
2485 offset = ((int) fn) - (buildaddr + 5);
2486 memcpy (buf + 1, &offset, 4);
2487 append_insns (&buildaddr, 5, buf);
2488 current_insn_ptr = buildaddr;
2489 }
2490
2491 static void
2492 i386_emit_reg (int reg)
2493 {
2494 unsigned char buf[16];
2495 int i;
2496 CORE_ADDR buildaddr;
2497
2498 EMIT_ASM32 (i386_reg_a,
2499 "sub $0x8,%esp");
2500 buildaddr = current_insn_ptr;
2501 i = 0;
2502 buf[i++] = 0xb8; /* mov $<n>,%eax */
2503 memcpy (&buf[i], &reg, sizeof (reg));
2504 i += 4;
2505 append_insns (&buildaddr, i, buf);
2506 current_insn_ptr = buildaddr;
2507 EMIT_ASM32 (i386_reg_b,
2508 "mov %eax,4(%esp)\n\t"
2509 "mov 8(%ebp),%eax\n\t"
2510 "mov %eax,(%esp)");
2511 i386_emit_call (get_raw_reg_func_addr ());
2512 EMIT_ASM32 (i386_reg_c,
2513 "xor %ebx,%ebx\n\t"
2514 "lea 0x8(%esp),%esp");
2515 }
2516
2517 static void
2518 i386_emit_pop (void)
2519 {
2520 EMIT_ASM32 (i386_pop,
2521 "pop %eax\n\t"
2522 "pop %ebx");
2523 }
2524
2525 static void
2526 i386_emit_stack_flush (void)
2527 {
2528 EMIT_ASM32 (i386_stack_flush,
2529 "push %ebx\n\t"
2530 "push %eax");
2531 }
2532
2533 static void
2534 i386_emit_zero_ext (int arg)
2535 {
2536 switch (arg)
2537 {
2538 case 8:
2539 EMIT_ASM32 (i386_zero_ext_8,
2540 "and $0xff,%eax\n\t"
2541 "xor %ebx,%ebx");
2542 break;
2543 case 16:
2544 EMIT_ASM32 (i386_zero_ext_16,
2545 "and $0xffff,%eax\n\t"
2546 "xor %ebx,%ebx");
2547 break;
2548 case 32:
2549 EMIT_ASM32 (i386_zero_ext_32,
2550 "xor %ebx,%ebx");
2551 break;
2552 default:
2553 emit_error = 1;
2554 }
2555 }
2556
2557 static void
2558 i386_emit_swap (void)
2559 {
2560 EMIT_ASM32 (i386_swap,
2561 "mov %eax,%ecx\n\t"
2562 "mov %ebx,%edx\n\t"
2563 "pop %eax\n\t"
2564 "pop %ebx\n\t"
2565 "push %edx\n\t"
2566 "push %ecx");
2567 }
2568
2569 static void
2570 i386_emit_stack_adjust (int n)
2571 {
2572 unsigned char buf[16];
2573 int i;
2574 CORE_ADDR buildaddr = current_insn_ptr;
2575
2576 i = 0;
2577 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2578 buf[i++] = 0x64;
2579 buf[i++] = 0x24;
2580 buf[i++] = n * 8;
2581 append_insns (&buildaddr, i, buf);
2582 current_insn_ptr = buildaddr;
2583 }
2584
2585 /* FN's prototype is `LONGEST(*fn)(int)'. */
2586
2587 static void
2588 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2589 {
2590 unsigned char buf[16];
2591 int i;
2592 CORE_ADDR buildaddr;
2593
2594 EMIT_ASM32 (i386_int_call_1_a,
2595 /* Reserve a bit of stack space. */
2596 "sub $0x8,%esp");
2597 /* Put the one argument on the stack. */
2598 buildaddr = current_insn_ptr;
2599 i = 0;
2600 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2601 buf[i++] = 0x04;
2602 buf[i++] = 0x24;
2603 memcpy (&buf[i], &arg1, sizeof (arg1));
2604 i += 4;
2605 append_insns (&buildaddr, i, buf);
2606 current_insn_ptr = buildaddr;
2607 i386_emit_call (fn);
2608 EMIT_ASM32 (i386_int_call_1_c,
2609 "mov %edx,%ebx\n\t"
2610 "lea 0x8(%esp),%esp");
2611 }
2612
2613 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2614
2615 static void
2616 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2617 {
2618 unsigned char buf[16];
2619 int i;
2620 CORE_ADDR buildaddr;
2621
2622 EMIT_ASM32 (i386_void_call_2_a,
2623 /* Preserve %eax only; we don't have to worry about %ebx. */
2624 "push %eax\n\t"
2625 /* Reserve a bit of stack space for arguments. */
2626 "sub $0x10,%esp\n\t"
2627 /* Copy "top" to the second argument position. (Note that
2628 we can't assume function won't scribble on its
2629 arguments, so don't try to restore from this.) */
2630 "mov %eax,4(%esp)\n\t"
2631 "mov %ebx,8(%esp)");
2632 /* Put the first argument on the stack. */
2633 buildaddr = current_insn_ptr;
2634 i = 0;
2635 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2636 buf[i++] = 0x04;
2637 buf[i++] = 0x24;
2638 memcpy (&buf[i], &arg1, sizeof (arg1));
2639 i += 4;
2640 append_insns (&buildaddr, i, buf);
2641 current_insn_ptr = buildaddr;
2642 i386_emit_call (fn);
2643 EMIT_ASM32 (i386_void_call_2_b,
2644 "lea 0x10(%esp),%esp\n\t"
2645 /* Restore original stack top. */
2646 "pop %eax");
2647 }
2648
2649
2650 void
2651 i386_emit_eq_goto (int *offset_p, int *size_p)
2652 {
2653 EMIT_ASM32 (eq,
2654 /* Check low half first, more likely to be decider */
2655 "cmpl %eax,(%esp)\n\t"
2656 "jne .Leq_fallthru\n\t"
2657 "cmpl %ebx,4(%esp)\n\t"
2658 "jne .Leq_fallthru\n\t"
2659 "lea 0x8(%esp),%esp\n\t"
2660 "pop %eax\n\t"
2661 "pop %ebx\n\t"
2662 /* jmp, but don't trust the assembler to choose the right jump */
2663 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2664 ".Leq_fallthru:\n\t"
2665 "lea 0x8(%esp),%esp\n\t"
2666 "pop %eax\n\t"
2667 "pop %ebx");
2668
2669 if (offset_p)
2670 *offset_p = 18;
2671 if (size_p)
2672 *size_p = 4;
2673 }
2674
2675 void
2676 i386_emit_ne_goto (int *offset_p, int *size_p)
2677 {
2678 EMIT_ASM32 (ne,
2679 /* Check low half first, more likely to be decider */
2680 "cmpl %eax,(%esp)\n\t"
2681 "jne .Lne_jump\n\t"
2682 "cmpl %ebx,4(%esp)\n\t"
2683 "je .Lne_fallthru\n\t"
2684 ".Lne_jump:\n\t"
2685 "lea 0x8(%esp),%esp\n\t"
2686 "pop %eax\n\t"
2687 "pop %ebx\n\t"
2688 /* jmp, but don't trust the assembler to choose the right jump */
2689 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2690 ".Lne_fallthru:\n\t"
2691 "lea 0x8(%esp),%esp\n\t"
2692 "pop %eax\n\t"
2693 "pop %ebx");
2694
2695 if (offset_p)
2696 *offset_p = 18;
2697 if (size_p)
2698 *size_p = 4;
2699 }
2700
2701 void
2702 i386_emit_lt_goto (int *offset_p, int *size_p)
2703 {
2704 EMIT_ASM32 (lt,
2705 "cmpl %ebx,4(%esp)\n\t"
2706 "jl .Llt_jump\n\t"
2707 "jne .Llt_fallthru\n\t"
2708 "cmpl %eax,(%esp)\n\t"
2709 "jnl .Llt_fallthru\n\t"
2710 ".Llt_jump:\n\t"
2711 "lea 0x8(%esp),%esp\n\t"
2712 "pop %eax\n\t"
2713 "pop %ebx\n\t"
2714 /* jmp, but don't trust the assembler to choose the right jump */
2715 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2716 ".Llt_fallthru:\n\t"
2717 "lea 0x8(%esp),%esp\n\t"
2718 "pop %eax\n\t"
2719 "pop %ebx");
2720
2721 if (offset_p)
2722 *offset_p = 20;
2723 if (size_p)
2724 *size_p = 4;
2725 }
2726
2727 void
2728 i386_emit_le_goto (int *offset_p, int *size_p)
2729 {
2730 EMIT_ASM32 (le,
2731 "cmpl %ebx,4(%esp)\n\t"
2732 "jle .Lle_jump\n\t"
2733 "jne .Lle_fallthru\n\t"
2734 "cmpl %eax,(%esp)\n\t"
2735 "jnle .Lle_fallthru\n\t"
2736 ".Lle_jump:\n\t"
2737 "lea 0x8(%esp),%esp\n\t"
2738 "pop %eax\n\t"
2739 "pop %ebx\n\t"
2740 /* jmp, but don't trust the assembler to choose the right jump */
2741 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2742 ".Lle_fallthru:\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2744 "pop %eax\n\t"
2745 "pop %ebx");
2746
2747 if (offset_p)
2748 *offset_p = 20;
2749 if (size_p)
2750 *size_p = 4;
2751 }
2752
2753 void
2754 i386_emit_gt_goto (int *offset_p, int *size_p)
2755 {
2756 EMIT_ASM32 (gt,
2757 "cmpl %ebx,4(%esp)\n\t"
2758 "jg .Lgt_jump\n\t"
2759 "jne .Lgt_fallthru\n\t"
2760 "cmpl %eax,(%esp)\n\t"
2761 "jng .Lgt_fallthru\n\t"
2762 ".Lgt_jump:\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Lgt_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx");
2772
2773 if (offset_p)
2774 *offset_p = 20;
2775 if (size_p)
2776 *size_p = 4;
2777 }
2778
2779 void
2780 i386_emit_ge_goto (int *offset_p, int *size_p)
2781 {
2782 EMIT_ASM32 (ge,
2783 "cmpl %ebx,4(%esp)\n\t"
2784 "jge .Lge_jump\n\t"
2785 "jne .Lge_fallthru\n\t"
2786 "cmpl %eax,(%esp)\n\t"
2787 "jnge .Lge_fallthru\n\t"
2788 ".Lge_jump:\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2790 "pop %eax\n\t"
2791 "pop %ebx\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Lge_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx");
2798
2799 if (offset_p)
2800 *offset_p = 20;
2801 if (size_p)
2802 *size_p = 4;
2803 }
2804
2805 struct emit_ops i386_emit_ops =
2806 {
2807 i386_emit_prologue,
2808 i386_emit_epilogue,
2809 i386_emit_add,
2810 i386_emit_sub,
2811 i386_emit_mul,
2812 i386_emit_lsh,
2813 i386_emit_rsh_signed,
2814 i386_emit_rsh_unsigned,
2815 i386_emit_ext,
2816 i386_emit_log_not,
2817 i386_emit_bit_and,
2818 i386_emit_bit_or,
2819 i386_emit_bit_xor,
2820 i386_emit_bit_not,
2821 i386_emit_equal,
2822 i386_emit_less_signed,
2823 i386_emit_less_unsigned,
2824 i386_emit_ref,
2825 i386_emit_if_goto,
2826 i386_emit_goto,
2827 i386_write_goto_address,
2828 i386_emit_const,
2829 i386_emit_call,
2830 i386_emit_reg,
2831 i386_emit_pop,
2832 i386_emit_stack_flush,
2833 i386_emit_zero_ext,
2834 i386_emit_swap,
2835 i386_emit_stack_adjust,
2836 i386_emit_int_call_1,
2837 i386_emit_void_call_2,
2838 i386_emit_eq_goto,
2839 i386_emit_ne_goto,
2840 i386_emit_lt_goto,
2841 i386_emit_le_goto,
2842 i386_emit_gt_goto,
2843 i386_emit_ge_goto
2844 };
2845
2846
2847 static struct emit_ops *
2848 x86_emit_ops (void)
2849 {
2850 #ifdef __x86_64__
2851 if (is_64bit_tdesc ())
2852 return &amd64_emit_ops;
2853 else
2854 #endif
2855 return &i386_emit_ops;
2856 }
2857
2858 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2859
2860 static const gdb_byte *
2861 x86_sw_breakpoint_from_kind (int kind, int *size)
2862 {
2863 *size = x86_breakpoint_len;
2864 return x86_breakpoint;
2865 }
2866
2867 static int
2868 x86_supports_range_stepping (void)
2869 {
2870 return 1;
2871 }
2872
2873 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2874 */
2875
2876 static int
2877 x86_supports_hardware_single_step (void)
2878 {
2879 return 1;
2880 }
2881
2882 static int
2883 x86_get_ipa_tdesc_idx (void)
2884 {
2885 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2886 const struct target_desc *tdesc = regcache->tdesc;
2887
2888 #ifdef __x86_64__
2889 if (tdesc == tdesc_amd64_linux || tdesc == tdesc_amd64_linux_no_xml
2890 || tdesc == tdesc_x32_linux)
2891 return X86_TDESC_SSE;
2892 if (tdesc == tdesc_amd64_avx_linux || tdesc == tdesc_x32_avx_linux)
2893 return X86_TDESC_AVX;
2894 if (tdesc == tdesc_amd64_mpx_linux)
2895 return X86_TDESC_MPX;
2896 if (tdesc == tdesc_amd64_avx_mpx_linux)
2897 return X86_TDESC_AVX_MPX;
2898 if (tdesc == tdesc_amd64_avx512_linux || tdesc == tdesc_x32_avx512_linux)
2899 return X86_TDESC_AVX512;
2900 #endif
2901
2902 if (tdesc == tdesc_i386_mmx_linux)
2903 return X86_TDESC_MMX;
2904 if (tdesc == tdesc_i386_linux || tdesc == tdesc_i386_linux_no_xml)
2905 return X86_TDESC_SSE;
2906 if (tdesc == tdesc_i386_avx_linux)
2907 return X86_TDESC_AVX;
2908 if (tdesc == tdesc_i386_mpx_linux)
2909 return X86_TDESC_MPX;
2910 if (tdesc == tdesc_i386_avx_mpx_linux)
2911 return X86_TDESC_AVX_MPX;
2912 if (tdesc == tdesc_i386_avx512_linux)
2913 return X86_TDESC_AVX512;
2914
2915 return 0;
2916 }
2917
2918 /* This is initialized assuming an amd64 target.
2919 x86_arch_setup will correct it for i386 or amd64 targets. */
2920
2921 struct linux_target_ops the_low_target =
2922 {
2923 x86_arch_setup,
2924 x86_linux_regs_info,
2925 x86_cannot_fetch_register,
2926 x86_cannot_store_register,
2927 NULL, /* fetch_register */
2928 x86_get_pc,
2929 x86_set_pc,
2930 NULL, /* breakpoint_kind_from_pc */
2931 x86_sw_breakpoint_from_kind,
2932 NULL,
2933 1,
2934 x86_breakpoint_at,
2935 x86_supports_z_point_type,
2936 x86_insert_point,
2937 x86_remove_point,
2938 x86_stopped_by_watchpoint,
2939 x86_stopped_data_address,
2940 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2941 native i386 case (no registers smaller than an xfer unit), and are not
2942 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2943 NULL,
2944 NULL,
2945 /* need to fix up i386 siginfo if host is amd64 */
2946 x86_siginfo_fixup,
2947 x86_linux_new_process,
2948 x86_linux_new_thread,
2949 x86_linux_new_fork,
2950 x86_linux_prepare_to_resume,
2951 x86_linux_process_qsupported,
2952 x86_supports_tracepoints,
2953 x86_get_thread_area,
2954 x86_install_fast_tracepoint_jump_pad,
2955 x86_emit_ops,
2956 x86_get_min_fast_tracepoint_insn_len,
2957 x86_supports_range_stepping,
2958 NULL, /* breakpoint_kind_from_current_state */
2959 x86_supports_hardware_single_step,
2960 x86_get_syscall_trapinfo,
2961 x86_get_ipa_tdesc_idx,
2962 };
2963
2964 void
2965 initialize_low_arch (void)
2966 {
2967 /* Initialize the Linux target descriptions. */
2968 #ifdef __x86_64__
2969 init_registers_amd64_linux ();
2970 init_registers_amd64_avx_linux ();
2971 init_registers_amd64_avx512_linux ();
2972 init_registers_amd64_mpx_linux ();
2973 init_registers_amd64_avx_mpx_linux ();
2974
2975 init_registers_x32_linux ();
2976 init_registers_x32_avx_linux ();
2977 init_registers_x32_avx512_linux ();
2978
2979 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
2980 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2981 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2982 #endif
2983 init_registers_i386_linux ();
2984 init_registers_i386_mmx_linux ();
2985 init_registers_i386_avx_linux ();
2986 init_registers_i386_avx512_linux ();
2987 init_registers_i386_mpx_linux ();
2988 init_registers_i386_avx_mpx_linux ();
2989
2990 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
2991 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2992 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2993
2994 initialize_regsets_info (&x86_regsets_info);
2995 }
This page took 0.090588 seconds and 5 git commands to generate.