Add target description for avx-avx512.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
76 #include <sys/uio.h>
77
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
81
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
86
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
95
96 /* Per-process arch-specific data we want to keep. */
97
98 struct arch_process_info
99 {
100 struct x86_debug_reg_state debug_reg_state;
101 };
102
103 #ifdef __x86_64__
104
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap[] =
109 {
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114 };
115
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
120 #define REGSIZE 8
121
122 static const int x86_64_regmap[] =
123 {
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
137 21 * 8, 22 * 8,
138 #else
139 -1, -1,
140 #endif
141 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
142 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
146 -1, -1, -1, -1, -1, -1, -1, -1,
147 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
149 -1, -1, -1, -1, -1, -1, -1, -1,
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1, -1, -1, -1, -1, -1, -1, -1
152 };
153
154 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
155 #define X86_64_USER_REGS (GS + 1)
156
157 #else /* ! __x86_64__ */
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161 static /*const*/ int i386_regmap[] =
162 {
163 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
164 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
165 EIP * 4, EFL * 4, CS * 4, SS * 4,
166 DS * 4, ES * 4, FS * 4, GS * 4
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 #define REGSIZE 4
172
173 #endif
174
175 #ifdef __x86_64__
176
177 /* Returns true if the current inferior belongs to a x86-64 process,
178 per the tdesc. */
179
180 static int
181 is_64bit_tdesc (void)
182 {
183 struct regcache *regcache = get_thread_regcache (current_thread, 0);
184
185 return register_size (regcache->tdesc, 0) == 8;
186 }
187
188 #endif
189
190 \f
191 /* Called by libthread_db. */
192
193 ps_err_e
194 ps_get_thread_area (struct ps_prochandle *ph,
195 lwpid_t lwpid, int idx, void **base)
196 {
197 #ifdef __x86_64__
198 int use_64bit = is_64bit_tdesc ();
199
200 if (use_64bit)
201 {
202 switch (idx)
203 {
204 case FS:
205 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
206 return PS_OK;
207 break;
208 case GS:
209 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
210 return PS_OK;
211 break;
212 default:
213 return PS_BADADDR;
214 }
215 return PS_ERR;
216 }
217 #endif
218
219 {
220 unsigned int desc[4];
221
222 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
223 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
224 return PS_ERR;
225
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base = (void *) (uintptr_t) desc[1];
228 return PS_OK;
229 }
230 }
231
232 /* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
236
237 static int
238 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
239 {
240 #ifdef __x86_64__
241 int use_64bit = is_64bit_tdesc ();
242
243 if (use_64bit)
244 {
245 void *base;
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
247 {
248 *addr = (CORE_ADDR) (uintptr_t) base;
249 return 0;
250 }
251
252 return -1;
253 }
254 #endif
255
256 {
257 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
258 struct thread_info *thr = get_lwp_thread (lwp);
259 struct regcache *regcache = get_thread_regcache (thr, 1);
260 unsigned int desc[4];
261 ULONGEST gs = 0;
262 const int reg_thread_area = 3; /* bits to scale down register value. */
263 int idx;
264
265 collect_register_by_name (regcache, "gs", &gs);
266
267 idx = gs >> reg_thread_area;
268
269 if (ptrace (PTRACE_GET_THREAD_AREA,
270 lwpid_of (thr),
271 (void *) (long) idx, (unsigned long) &desc) < 0)
272 return -1;
273
274 *addr = desc[1];
275 return 0;
276 }
277 }
278
279
280 \f
281 static int
282 x86_cannot_store_register (int regno)
283 {
284 #ifdef __x86_64__
285 if (is_64bit_tdesc ())
286 return 0;
287 #endif
288
289 return regno >= I386_NUM_REGS;
290 }
291
292 static int
293 x86_cannot_fetch_register (int regno)
294 {
295 #ifdef __x86_64__
296 if (is_64bit_tdesc ())
297 return 0;
298 #endif
299
300 return regno >= I386_NUM_REGS;
301 }
302
303 static void
304 x86_fill_gregset (struct regcache *regcache, void *buf)
305 {
306 int i;
307
308 #ifdef __x86_64__
309 if (register_size (regcache->tdesc, 0) == 8)
310 {
311 for (i = 0; i < X86_64_NUM_REGS; i++)
312 if (x86_64_regmap[i] != -1)
313 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
314
315 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
316 {
317 unsigned long base;
318 int lwpid = lwpid_of (current_thread);
319
320 collect_register_by_name (regcache, "fs_base", &base);
321 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
322
323 collect_register_by_name (regcache, "gs_base", &base);
324 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
325 }
326 #endif
327
328 return;
329 }
330
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf, 0x00, X86_64_USER_REGS * 8);
334 #endif
335
336 for (i = 0; i < I386_NUM_REGS; i++)
337 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
338
339 collect_register_by_name (regcache, "orig_eax",
340 ((char *) buf) + ORIG_EAX * REGSIZE);
341 }
342
343 static void
344 x86_store_gregset (struct regcache *regcache, const void *buf)
345 {
346 int i;
347
348 #ifdef __x86_64__
349 if (register_size (regcache->tdesc, 0) == 8)
350 {
351 for (i = 0; i < X86_64_NUM_REGS; i++)
352 if (x86_64_regmap[i] != -1)
353 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
354
355 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
356 {
357 unsigned long base;
358 int lwpid = lwpid_of (current_thread);
359
360 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
361 supply_register_by_name (regcache, "fs_base", &base);
362
363 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
364 supply_register_by_name (regcache, "gs_base", &base);
365 }
366 #endif
367 return;
368 }
369 #endif
370
371 for (i = 0; i < I386_NUM_REGS; i++)
372 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
373
374 supply_register_by_name (regcache, "orig_eax",
375 ((char *) buf) + ORIG_EAX * REGSIZE);
376 }
377
378 static void
379 x86_fill_fpregset (struct regcache *regcache, void *buf)
380 {
381 #ifdef __x86_64__
382 i387_cache_to_fxsave (regcache, buf);
383 #else
384 i387_cache_to_fsave (regcache, buf);
385 #endif
386 }
387
388 static void
389 x86_store_fpregset (struct regcache *regcache, const void *buf)
390 {
391 #ifdef __x86_64__
392 i387_fxsave_to_cache (regcache, buf);
393 #else
394 i387_fsave_to_cache (regcache, buf);
395 #endif
396 }
397
398 #ifndef __x86_64__
399
400 static void
401 x86_fill_fpxregset (struct regcache *regcache, void *buf)
402 {
403 i387_cache_to_fxsave (regcache, buf);
404 }
405
406 static void
407 x86_store_fpxregset (struct regcache *regcache, const void *buf)
408 {
409 i387_fxsave_to_cache (regcache, buf);
410 }
411
412 #endif
413
414 static void
415 x86_fill_xstateregset (struct regcache *regcache, void *buf)
416 {
417 i387_cache_to_xsave (regcache, buf);
418 }
419
420 static void
421 x86_store_xstateregset (struct regcache *regcache, const void *buf)
422 {
423 i387_xsave_to_cache (regcache, buf);
424 }
425
426 /* ??? The non-biarch i386 case stores all the i387 regs twice.
427 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
428 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
429 doesn't work. IWBN to avoid the duplication in the case where it
430 does work. Maybe the arch_setup routine could check whether it works
431 and update the supported regsets accordingly. */
432
433 static struct regset_info x86_regsets[] =
434 {
435 #ifdef HAVE_PTRACE_GETREGS
436 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
437 GENERAL_REGS,
438 x86_fill_gregset, x86_store_gregset },
439 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
440 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
441 # ifndef __x86_64__
442 # ifdef HAVE_PTRACE_GETFPXREGS
443 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
444 EXTENDED_REGS,
445 x86_fill_fpxregset, x86_store_fpxregset },
446 # endif
447 # endif
448 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
449 FP_REGS,
450 x86_fill_fpregset, x86_store_fpregset },
451 #endif /* HAVE_PTRACE_GETREGS */
452 NULL_REGSET
453 };
454
455 static CORE_ADDR
456 x86_get_pc (struct regcache *regcache)
457 {
458 int use_64bit = register_size (regcache->tdesc, 0) == 8;
459
460 if (use_64bit)
461 {
462 uint64_t pc;
463
464 collect_register_by_name (regcache, "rip", &pc);
465 return (CORE_ADDR) pc;
466 }
467 else
468 {
469 uint32_t pc;
470
471 collect_register_by_name (regcache, "eip", &pc);
472 return (CORE_ADDR) pc;
473 }
474 }
475
476 static void
477 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
478 {
479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
480
481 if (use_64bit)
482 {
483 uint64_t newpc = pc;
484
485 supply_register_by_name (regcache, "rip", &newpc);
486 }
487 else
488 {
489 uint32_t newpc = pc;
490
491 supply_register_by_name (regcache, "eip", &newpc);
492 }
493 }
494 \f
495 static const gdb_byte x86_breakpoint[] = { 0xCC };
496 #define x86_breakpoint_len 1
497
498 static int
499 x86_breakpoint_at (CORE_ADDR pc)
500 {
501 unsigned char c;
502
503 (*the_target->read_memory) (pc, &c, 1);
504 if (c == 0xCC)
505 return 1;
506
507 return 0;
508 }
509 \f
510 /* Low-level function vector. */
511 struct x86_dr_low_type x86_dr_low =
512 {
513 x86_linux_dr_set_control,
514 x86_linux_dr_set_addr,
515 x86_linux_dr_get_addr,
516 x86_linux_dr_get_status,
517 x86_linux_dr_get_control,
518 sizeof (void *),
519 };
520 \f
521 /* Breakpoint/Watchpoint support. */
522
523 static int
524 x86_supports_z_point_type (char z_type)
525 {
526 switch (z_type)
527 {
528 case Z_PACKET_SW_BP:
529 case Z_PACKET_HW_BP:
530 case Z_PACKET_WRITE_WP:
531 case Z_PACKET_ACCESS_WP:
532 return 1;
533 default:
534 return 0;
535 }
536 }
537
538 static int
539 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
540 int size, struct raw_breakpoint *bp)
541 {
542 struct process_info *proc = current_process ();
543
544 switch (type)
545 {
546 case raw_bkpt_type_hw:
547 case raw_bkpt_type_write_wp:
548 case raw_bkpt_type_access_wp:
549 {
550 enum target_hw_bp_type hw_type
551 = raw_bkpt_type_to_target_hw_bp_type (type);
552 struct x86_debug_reg_state *state
553 = &proc->priv->arch_private->debug_reg_state;
554
555 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
556 }
557
558 default:
559 /* Unsupported. */
560 return 1;
561 }
562 }
563
564 static int
565 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
566 int size, struct raw_breakpoint *bp)
567 {
568 struct process_info *proc = current_process ();
569
570 switch (type)
571 {
572 case raw_bkpt_type_hw:
573 case raw_bkpt_type_write_wp:
574 case raw_bkpt_type_access_wp:
575 {
576 enum target_hw_bp_type hw_type
577 = raw_bkpt_type_to_target_hw_bp_type (type);
578 struct x86_debug_reg_state *state
579 = &proc->priv->arch_private->debug_reg_state;
580
581 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
582 }
583 default:
584 /* Unsupported. */
585 return 1;
586 }
587 }
588
589 static int
590 x86_stopped_by_watchpoint (void)
591 {
592 struct process_info *proc = current_process ();
593 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
594 }
595
596 static CORE_ADDR
597 x86_stopped_data_address (void)
598 {
599 struct process_info *proc = current_process ();
600 CORE_ADDR addr;
601 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
602 &addr))
603 return addr;
604 return 0;
605 }
606 \f
607 /* Called when a new process is created. */
608
609 static struct arch_process_info *
610 x86_linux_new_process (void)
611 {
612 struct arch_process_info *info = XCNEW (struct arch_process_info);
613
614 x86_low_init_dregs (&info->debug_reg_state);
615
616 return info;
617 }
618
619 /* Target routine for linux_new_fork. */
620
621 static void
622 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
623 {
624 /* These are allocated by linux_add_process. */
625 gdb_assert (parent->priv != NULL
626 && parent->priv->arch_private != NULL);
627 gdb_assert (child->priv != NULL
628 && child->priv->arch_private != NULL);
629
630 /* Linux kernel before 2.6.33 commit
631 72f674d203cd230426437cdcf7dd6f681dad8b0d
632 will inherit hardware debug registers from parent
633 on fork/vfork/clone. Newer Linux kernels create such tasks with
634 zeroed debug registers.
635
636 GDB core assumes the child inherits the watchpoints/hw
637 breakpoints of the parent, and will remove them all from the
638 forked off process. Copy the debug registers mirrors into the
639 new process so that all breakpoints and watchpoints can be
640 removed together. The debug registers mirror will become zeroed
641 in the end before detaching the forked off process, thus making
642 this compatible with older Linux kernels too. */
643
644 *child->priv->arch_private = *parent->priv->arch_private;
645 }
646
647 /* See nat/x86-dregs.h. */
648
649 struct x86_debug_reg_state *
650 x86_debug_reg_state (pid_t pid)
651 {
652 struct process_info *proc = find_process_pid (pid);
653
654 return &proc->priv->arch_private->debug_reg_state;
655 }
656 \f
657 /* When GDBSERVER is built as a 64-bit application on linux, the
658 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
659 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
660 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
661 conversion in-place ourselves. */
662
663 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
664 layout of the inferiors' architecture. Returns true if any
665 conversion was done; false otherwise. If DIRECTION is 1, then copy
666 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
667 INF. */
668
669 static int
670 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
671 {
672 #ifdef __x86_64__
673 unsigned int machine;
674 int tid = lwpid_of (current_thread);
675 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
676
677 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
678 if (!is_64bit_tdesc ())
679 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
680 FIXUP_32);
681 /* No fixup for native x32 GDB. */
682 else if (!is_elf64 && sizeof (void *) == 8)
683 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
684 FIXUP_X32);
685 #endif
686
687 return 0;
688 }
689 \f
690 static int use_xml;
691
692 /* Format of XSAVE extended state is:
693 struct
694 {
695 fxsave_bytes[0..463]
696 sw_usable_bytes[464..511]
697 xstate_hdr_bytes[512..575]
698 avx_bytes[576..831]
699 future_state etc
700 };
701
702 Same memory layout will be used for the coredump NT_X86_XSTATE
703 representing the XSAVE extended state registers.
704
705 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
706 extended state mask, which is the same as the extended control register
707 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
708 together with the mask saved in the xstate_hdr_bytes to determine what
709 states the processor/OS supports and what state, used or initialized,
710 the process/thread is in. */
711 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
712
713 /* Does the current host support the GETFPXREGS request? The header
714 file may or may not define it, and even if it is defined, the
715 kernel will return EIO if it's running on a pre-SSE processor. */
716 int have_ptrace_getfpxregs =
717 #ifdef HAVE_PTRACE_GETFPXREGS
718 -1
719 #else
720 0
721 #endif
722 ;
723
724 /* Get Linux/x86 target description from running target. */
725
726 static const struct target_desc *
727 x86_linux_read_description (void)
728 {
729 unsigned int machine;
730 int is_elf64;
731 int xcr0_features;
732 int tid;
733 static uint64_t xcr0;
734 struct regset_info *regset;
735
736 tid = lwpid_of (current_thread);
737
738 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
739
740 if (sizeof (void *) == 4)
741 {
742 if (is_elf64 > 0)
743 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
744 #ifndef __x86_64__
745 else if (machine == EM_X86_64)
746 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
747 #endif
748 }
749
750 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
751 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
752 {
753 elf_fpxregset_t fpxregs;
754
755 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
756 {
757 have_ptrace_getfpxregs = 0;
758 have_ptrace_getregset = 0;
759 return tdesc_i386_mmx_linux;
760 }
761 else
762 have_ptrace_getfpxregs = 1;
763 }
764 #endif
765
766 if (!use_xml)
767 {
768 x86_xcr0 = X86_XSTATE_SSE_MASK;
769
770 /* Don't use XML. */
771 #ifdef __x86_64__
772 if (machine == EM_X86_64)
773 return tdesc_amd64_linux_no_xml;
774 else
775 #endif
776 return tdesc_i386_linux_no_xml;
777 }
778
779 if (have_ptrace_getregset == -1)
780 {
781 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
782 struct iovec iov;
783
784 iov.iov_base = xstateregs;
785 iov.iov_len = sizeof (xstateregs);
786
787 /* Check if PTRACE_GETREGSET works. */
788 if (ptrace (PTRACE_GETREGSET, tid,
789 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
790 have_ptrace_getregset = 0;
791 else
792 {
793 have_ptrace_getregset = 1;
794
795 /* Get XCR0 from XSAVE extended state. */
796 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
797 / sizeof (uint64_t))];
798
799 /* Use PTRACE_GETREGSET if it is available. */
800 for (regset = x86_regsets;
801 regset->fill_function != NULL; regset++)
802 if (regset->get_request == PTRACE_GETREGSET)
803 regset->size = X86_XSTATE_SIZE (xcr0);
804 else if (regset->type != GENERAL_REGS)
805 regset->size = 0;
806 }
807 }
808
809 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
810 xcr0_features = (have_ptrace_getregset
811 && (xcr0 & X86_XSTATE_ALL_MASK));
812
813 if (xcr0_features)
814 x86_xcr0 = xcr0;
815
816 if (machine == EM_X86_64)
817 {
818 #ifdef __x86_64__
819 if (is_elf64)
820 {
821 if (xcr0_features)
822 {
823 switch (xcr0 & X86_XSTATE_ALL_MASK)
824 {
825 case X86_XSTATE_AVX_MPX_AVX512_MASK:
826 return tdesc_amd64_avx_mpx_avx512_linux;
827
828 case X86_XSTATE_AVX_AVX512_MASK:
829 return tdesc_amd64_avx_avx512_linux;
830
831 case X86_XSTATE_AVX_MPX_MASK:
832 return tdesc_amd64_avx_mpx_linux;
833
834 case X86_XSTATE_MPX_MASK:
835 return tdesc_amd64_mpx_linux;
836
837 case X86_XSTATE_AVX_MASK:
838 return tdesc_amd64_avx_linux;
839
840 default:
841 return tdesc_amd64_linux;
842 }
843 }
844 else
845 return tdesc_amd64_linux;
846 }
847 else
848 {
849 if (xcr0_features)
850 {
851 switch (xcr0 & X86_XSTATE_ALL_MASK)
852 {
853 case X86_XSTATE_AVX_MPX_AVX512_MASK: /* No MPX on x32. */
854 case X86_XSTATE_AVX_AVX512_MASK:
855 return tdesc_x32_avx_avx512_linux;
856
857 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
858 case X86_XSTATE_AVX_MASK:
859 return tdesc_x32_avx_linux;
860
861 default:
862 return tdesc_x32_linux;
863 }
864 }
865 else
866 return tdesc_x32_linux;
867 }
868 #endif
869 }
870 else
871 {
872 if (xcr0_features)
873 {
874 switch (xcr0 & X86_XSTATE_ALL_MASK)
875 {
876 case (X86_XSTATE_AVX_MPX_AVX512_MASK):
877 return tdesc_i386_avx_mpx_avx512_linux;
878
879 case (X86_XSTATE_AVX_AVX512_MASK):
880 return tdesc_i386_avx_avx512_linux;
881
882 case (X86_XSTATE_MPX_MASK):
883 return tdesc_i386_mpx_linux;
884
885 case (X86_XSTATE_AVX_MPX_MASK):
886 return tdesc_i386_avx_mpx_linux;
887
888 case (X86_XSTATE_AVX_MASK):
889 return tdesc_i386_avx_linux;
890
891 default:
892 return tdesc_i386_linux;
893 }
894 }
895 else
896 return tdesc_i386_linux;
897 }
898
899 gdb_assert_not_reached ("failed to return tdesc");
900 }
901
902 /* Callback for find_inferior. Stops iteration when a thread with a
903 given PID is found. */
904
905 static int
906 same_process_callback (struct inferior_list_entry *entry, void *data)
907 {
908 int pid = *(int *) data;
909
910 return (ptid_get_pid (entry->id) == pid);
911 }
912
913 /* Callback for for_each_inferior. Calls the arch_setup routine for
914 each process. */
915
916 static void
917 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
918 {
919 int pid = ptid_get_pid (entry->id);
920
921 /* Look up any thread of this processes. */
922 current_thread
923 = (struct thread_info *) find_inferior (&all_threads,
924 same_process_callback, &pid);
925
926 the_low_target.arch_setup ();
927 }
928
929 /* Update all the target description of all processes; a new GDB
930 connected, and it may or not support xml target descriptions. */
931
932 static void
933 x86_linux_update_xmltarget (void)
934 {
935 struct thread_info *saved_thread = current_thread;
936
937 /* Before changing the register cache's internal layout, flush the
938 contents of the current valid caches back to the threads, and
939 release the current regcache objects. */
940 regcache_release ();
941
942 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
943
944 current_thread = saved_thread;
945 }
946
947 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
948 PTRACE_GETREGSET. */
949
950 static void
951 x86_linux_process_qsupported (char **features, int count)
952 {
953 int i;
954
955 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
956 with "i386" in qSupported query, it supports x86 XML target
957 descriptions. */
958 use_xml = 0;
959 for (i = 0; i < count; i++)
960 {
961 const char *feature = features[i];
962
963 if (startswith (feature, "xmlRegisters="))
964 {
965 char *copy = xstrdup (feature + 13);
966 char *p;
967
968 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
969 {
970 if (strcmp (p, "i386") == 0)
971 {
972 use_xml = 1;
973 break;
974 }
975 }
976
977 free (copy);
978 }
979 }
980 x86_linux_update_xmltarget ();
981 }
982
983 /* Common for x86/x86-64. */
984
985 static struct regsets_info x86_regsets_info =
986 {
987 x86_regsets, /* regsets */
988 0, /* num_regsets */
989 NULL, /* disabled_regsets */
990 };
991
992 #ifdef __x86_64__
993 static struct regs_info amd64_linux_regs_info =
994 {
995 NULL, /* regset_bitmap */
996 NULL, /* usrregs_info */
997 &x86_regsets_info
998 };
999 #endif
1000 static struct usrregs_info i386_linux_usrregs_info =
1001 {
1002 I386_NUM_REGS,
1003 i386_regmap,
1004 };
1005
1006 static struct regs_info i386_linux_regs_info =
1007 {
1008 NULL, /* regset_bitmap */
1009 &i386_linux_usrregs_info,
1010 &x86_regsets_info
1011 };
1012
1013 const struct regs_info *
1014 x86_linux_regs_info (void)
1015 {
1016 #ifdef __x86_64__
1017 if (is_64bit_tdesc ())
1018 return &amd64_linux_regs_info;
1019 else
1020 #endif
1021 return &i386_linux_regs_info;
1022 }
1023
1024 /* Initialize the target description for the architecture of the
1025 inferior. */
1026
1027 static void
1028 x86_arch_setup (void)
1029 {
1030 current_process ()->tdesc = x86_linux_read_description ();
1031 }
1032
1033 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1034 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1035
1036 static void
1037 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
1038 {
1039 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1040
1041 if (use_64bit)
1042 {
1043 long l_sysno;
1044
1045 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1046 *sysno = (int) l_sysno;
1047 }
1048 else
1049 collect_register_by_name (regcache, "orig_eax", sysno);
1050 }
1051
1052 static int
1053 x86_supports_tracepoints (void)
1054 {
1055 return 1;
1056 }
1057
1058 static void
1059 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1060 {
1061 write_inferior_memory (*to, buf, len);
1062 *to += len;
1063 }
1064
1065 static int
1066 push_opcode (unsigned char *buf, char *op)
1067 {
1068 unsigned char *buf_org = buf;
1069
1070 while (1)
1071 {
1072 char *endptr;
1073 unsigned long ul = strtoul (op, &endptr, 16);
1074
1075 if (endptr == op)
1076 break;
1077
1078 *buf++ = ul;
1079 op = endptr;
1080 }
1081
1082 return buf - buf_org;
1083 }
1084
1085 #ifdef __x86_64__
1086
1087 /* Build a jump pad that saves registers and calls a collection
1088 function. Writes a jump instruction to the jump pad to
1089 JJUMPAD_INSN. The caller is responsible to write it in at the
1090 tracepoint address. */
1091
1092 static int
1093 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1094 CORE_ADDR collector,
1095 CORE_ADDR lockaddr,
1096 ULONGEST orig_size,
1097 CORE_ADDR *jump_entry,
1098 CORE_ADDR *trampoline,
1099 ULONGEST *trampoline_size,
1100 unsigned char *jjump_pad_insn,
1101 ULONGEST *jjump_pad_insn_size,
1102 CORE_ADDR *adjusted_insn_addr,
1103 CORE_ADDR *adjusted_insn_addr_end,
1104 char *err)
1105 {
1106 unsigned char buf[40];
1107 int i, offset;
1108 int64_t loffset;
1109
1110 CORE_ADDR buildaddr = *jump_entry;
1111
1112 /* Build the jump pad. */
1113
1114 /* First, do tracepoint data collection. Save registers. */
1115 i = 0;
1116 /* Need to ensure stack pointer saved first. */
1117 buf[i++] = 0x54; /* push %rsp */
1118 buf[i++] = 0x55; /* push %rbp */
1119 buf[i++] = 0x57; /* push %rdi */
1120 buf[i++] = 0x56; /* push %rsi */
1121 buf[i++] = 0x52; /* push %rdx */
1122 buf[i++] = 0x51; /* push %rcx */
1123 buf[i++] = 0x53; /* push %rbx */
1124 buf[i++] = 0x50; /* push %rax */
1125 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1126 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1127 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1128 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1129 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1130 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1131 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1132 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1133 buf[i++] = 0x9c; /* pushfq */
1134 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1135 buf[i++] = 0xbf;
1136 memcpy (buf + i, &tpaddr, 8);
1137 i += 8;
1138 buf[i++] = 0x57; /* push %rdi */
1139 append_insns (&buildaddr, i, buf);
1140
1141 /* Stack space for the collecting_t object. */
1142 i = 0;
1143 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1144 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1145 memcpy (buf + i, &tpoint, 8);
1146 i += 8;
1147 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1148 i += push_opcode (&buf[i],
1149 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1150 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1151 append_insns (&buildaddr, i, buf);
1152
1153 /* spin-lock. */
1154 i = 0;
1155 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1156 memcpy (&buf[i], (void *) &lockaddr, 8);
1157 i += 8;
1158 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1159 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1160 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1161 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1162 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1163 append_insns (&buildaddr, i, buf);
1164
1165 /* Set up the gdb_collect call. */
1166 /* At this point, (stack pointer + 0x18) is the base of our saved
1167 register block. */
1168
1169 i = 0;
1170 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1171 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1172
1173 /* tpoint address may be 64-bit wide. */
1174 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1175 memcpy (buf + i, &tpoint, 8);
1176 i += 8;
1177 append_insns (&buildaddr, i, buf);
1178
1179 /* The collector function being in the shared library, may be
1180 >31-bits away off the jump pad. */
1181 i = 0;
1182 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1183 memcpy (buf + i, &collector, 8);
1184 i += 8;
1185 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1186 append_insns (&buildaddr, i, buf);
1187
1188 /* Clear the spin-lock. */
1189 i = 0;
1190 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1191 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1192 memcpy (buf + i, &lockaddr, 8);
1193 i += 8;
1194 append_insns (&buildaddr, i, buf);
1195
1196 /* Remove stack that had been used for the collect_t object. */
1197 i = 0;
1198 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1199 append_insns (&buildaddr, i, buf);
1200
1201 /* Restore register state. */
1202 i = 0;
1203 buf[i++] = 0x48; /* add $0x8,%rsp */
1204 buf[i++] = 0x83;
1205 buf[i++] = 0xc4;
1206 buf[i++] = 0x08;
1207 buf[i++] = 0x9d; /* popfq */
1208 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1209 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1210 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1211 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1212 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1213 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1214 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1215 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1216 buf[i++] = 0x58; /* pop %rax */
1217 buf[i++] = 0x5b; /* pop %rbx */
1218 buf[i++] = 0x59; /* pop %rcx */
1219 buf[i++] = 0x5a; /* pop %rdx */
1220 buf[i++] = 0x5e; /* pop %rsi */
1221 buf[i++] = 0x5f; /* pop %rdi */
1222 buf[i++] = 0x5d; /* pop %rbp */
1223 buf[i++] = 0x5c; /* pop %rsp */
1224 append_insns (&buildaddr, i, buf);
1225
1226 /* Now, adjust the original instruction to execute in the jump
1227 pad. */
1228 *adjusted_insn_addr = buildaddr;
1229 relocate_instruction (&buildaddr, tpaddr);
1230 *adjusted_insn_addr_end = buildaddr;
1231
1232 /* Finally, write a jump back to the program. */
1233
1234 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1235 if (loffset > INT_MAX || loffset < INT_MIN)
1236 {
1237 sprintf (err,
1238 "E.Jump back from jump pad too far from tracepoint "
1239 "(offset 0x%" PRIx64 " > int32).", loffset);
1240 return 1;
1241 }
1242
1243 offset = (int) loffset;
1244 memcpy (buf, jump_insn, sizeof (jump_insn));
1245 memcpy (buf + 1, &offset, 4);
1246 append_insns (&buildaddr, sizeof (jump_insn), buf);
1247
1248 /* The jump pad is now built. Wire in a jump to our jump pad. This
1249 is always done last (by our caller actually), so that we can
1250 install fast tracepoints with threads running. This relies on
1251 the agent's atomic write support. */
1252 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1253 if (loffset > INT_MAX || loffset < INT_MIN)
1254 {
1255 sprintf (err,
1256 "E.Jump pad too far from tracepoint "
1257 "(offset 0x%" PRIx64 " > int32).", loffset);
1258 return 1;
1259 }
1260
1261 offset = (int) loffset;
1262
1263 memcpy (buf, jump_insn, sizeof (jump_insn));
1264 memcpy (buf + 1, &offset, 4);
1265 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1266 *jjump_pad_insn_size = sizeof (jump_insn);
1267
1268 /* Return the end address of our pad. */
1269 *jump_entry = buildaddr;
1270
1271 return 0;
1272 }
1273
1274 #endif /* __x86_64__ */
1275
1276 /* Build a jump pad that saves registers and calls a collection
1277 function. Writes a jump instruction to the jump pad to
1278 JJUMPAD_INSN. The caller is responsible to write it in at the
1279 tracepoint address. */
1280
1281 static int
1282 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1283 CORE_ADDR collector,
1284 CORE_ADDR lockaddr,
1285 ULONGEST orig_size,
1286 CORE_ADDR *jump_entry,
1287 CORE_ADDR *trampoline,
1288 ULONGEST *trampoline_size,
1289 unsigned char *jjump_pad_insn,
1290 ULONGEST *jjump_pad_insn_size,
1291 CORE_ADDR *adjusted_insn_addr,
1292 CORE_ADDR *adjusted_insn_addr_end,
1293 char *err)
1294 {
1295 unsigned char buf[0x100];
1296 int i, offset;
1297 CORE_ADDR buildaddr = *jump_entry;
1298
1299 /* Build the jump pad. */
1300
1301 /* First, do tracepoint data collection. Save registers. */
1302 i = 0;
1303 buf[i++] = 0x60; /* pushad */
1304 buf[i++] = 0x68; /* push tpaddr aka $pc */
1305 *((int *)(buf + i)) = (int) tpaddr;
1306 i += 4;
1307 buf[i++] = 0x9c; /* pushf */
1308 buf[i++] = 0x1e; /* push %ds */
1309 buf[i++] = 0x06; /* push %es */
1310 buf[i++] = 0x0f; /* push %fs */
1311 buf[i++] = 0xa0;
1312 buf[i++] = 0x0f; /* push %gs */
1313 buf[i++] = 0xa8;
1314 buf[i++] = 0x16; /* push %ss */
1315 buf[i++] = 0x0e; /* push %cs */
1316 append_insns (&buildaddr, i, buf);
1317
1318 /* Stack space for the collecting_t object. */
1319 i = 0;
1320 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1321
1322 /* Build the object. */
1323 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1324 memcpy (buf + i, &tpoint, 4);
1325 i += 4;
1326 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1327
1328 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1329 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1330 append_insns (&buildaddr, i, buf);
1331
1332 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1333 If we cared for it, this could be using xchg alternatively. */
1334
1335 i = 0;
1336 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1337 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1338 %esp,<lockaddr> */
1339 memcpy (&buf[i], (void *) &lockaddr, 4);
1340 i += 4;
1341 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1342 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1343 append_insns (&buildaddr, i, buf);
1344
1345
1346 /* Set up arguments to the gdb_collect call. */
1347 i = 0;
1348 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1349 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1350 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1351 append_insns (&buildaddr, i, buf);
1352
1353 i = 0;
1354 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1355 append_insns (&buildaddr, i, buf);
1356
1357 i = 0;
1358 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1359 memcpy (&buf[i], (void *) &tpoint, 4);
1360 i += 4;
1361 append_insns (&buildaddr, i, buf);
1362
1363 buf[0] = 0xe8; /* call <reladdr> */
1364 offset = collector - (buildaddr + sizeof (jump_insn));
1365 memcpy (buf + 1, &offset, 4);
1366 append_insns (&buildaddr, 5, buf);
1367 /* Clean up after the call. */
1368 buf[0] = 0x83; /* add $0x8,%esp */
1369 buf[1] = 0xc4;
1370 buf[2] = 0x08;
1371 append_insns (&buildaddr, 3, buf);
1372
1373
1374 /* Clear the spin-lock. This would need the LOCK prefix on older
1375 broken archs. */
1376 i = 0;
1377 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1378 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1379 memcpy (buf + i, &lockaddr, 4);
1380 i += 4;
1381 append_insns (&buildaddr, i, buf);
1382
1383
1384 /* Remove stack that had been used for the collect_t object. */
1385 i = 0;
1386 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1387 append_insns (&buildaddr, i, buf);
1388
1389 i = 0;
1390 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1391 buf[i++] = 0xc4;
1392 buf[i++] = 0x04;
1393 buf[i++] = 0x17; /* pop %ss */
1394 buf[i++] = 0x0f; /* pop %gs */
1395 buf[i++] = 0xa9;
1396 buf[i++] = 0x0f; /* pop %fs */
1397 buf[i++] = 0xa1;
1398 buf[i++] = 0x07; /* pop %es */
1399 buf[i++] = 0x1f; /* pop %ds */
1400 buf[i++] = 0x9d; /* popf */
1401 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1402 buf[i++] = 0xc4;
1403 buf[i++] = 0x04;
1404 buf[i++] = 0x61; /* popad */
1405 append_insns (&buildaddr, i, buf);
1406
1407 /* Now, adjust the original instruction to execute in the jump
1408 pad. */
1409 *adjusted_insn_addr = buildaddr;
1410 relocate_instruction (&buildaddr, tpaddr);
1411 *adjusted_insn_addr_end = buildaddr;
1412
1413 /* Write the jump back to the program. */
1414 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1415 memcpy (buf, jump_insn, sizeof (jump_insn));
1416 memcpy (buf + 1, &offset, 4);
1417 append_insns (&buildaddr, sizeof (jump_insn), buf);
1418
1419 /* The jump pad is now built. Wire in a jump to our jump pad. This
1420 is always done last (by our caller actually), so that we can
1421 install fast tracepoints with threads running. This relies on
1422 the agent's atomic write support. */
1423 if (orig_size == 4)
1424 {
1425 /* Create a trampoline. */
1426 *trampoline_size = sizeof (jump_insn);
1427 if (!claim_trampoline_space (*trampoline_size, trampoline))
1428 {
1429 /* No trampoline space available. */
1430 strcpy (err,
1431 "E.Cannot allocate trampoline space needed for fast "
1432 "tracepoints on 4-byte instructions.");
1433 return 1;
1434 }
1435
1436 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1437 memcpy (buf, jump_insn, sizeof (jump_insn));
1438 memcpy (buf + 1, &offset, 4);
1439 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1440
1441 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1442 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1443 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1444 memcpy (buf + 2, &offset, 2);
1445 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1446 *jjump_pad_insn_size = sizeof (small_jump_insn);
1447 }
1448 else
1449 {
1450 /* Else use a 32-bit relative jump instruction. */
1451 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1452 memcpy (buf, jump_insn, sizeof (jump_insn));
1453 memcpy (buf + 1, &offset, 4);
1454 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1455 *jjump_pad_insn_size = sizeof (jump_insn);
1456 }
1457
1458 /* Return the end address of our pad. */
1459 *jump_entry = buildaddr;
1460
1461 return 0;
1462 }
1463
1464 static int
1465 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1466 CORE_ADDR collector,
1467 CORE_ADDR lockaddr,
1468 ULONGEST orig_size,
1469 CORE_ADDR *jump_entry,
1470 CORE_ADDR *trampoline,
1471 ULONGEST *trampoline_size,
1472 unsigned char *jjump_pad_insn,
1473 ULONGEST *jjump_pad_insn_size,
1474 CORE_ADDR *adjusted_insn_addr,
1475 CORE_ADDR *adjusted_insn_addr_end,
1476 char *err)
1477 {
1478 #ifdef __x86_64__
1479 if (is_64bit_tdesc ())
1480 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1481 collector, lockaddr,
1482 orig_size, jump_entry,
1483 trampoline, trampoline_size,
1484 jjump_pad_insn,
1485 jjump_pad_insn_size,
1486 adjusted_insn_addr,
1487 adjusted_insn_addr_end,
1488 err);
1489 #endif
1490
1491 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1492 collector, lockaddr,
1493 orig_size, jump_entry,
1494 trampoline, trampoline_size,
1495 jjump_pad_insn,
1496 jjump_pad_insn_size,
1497 adjusted_insn_addr,
1498 adjusted_insn_addr_end,
1499 err);
1500 }
1501
1502 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1503 architectures. */
1504
1505 static int
1506 x86_get_min_fast_tracepoint_insn_len (void)
1507 {
1508 static int warned_about_fast_tracepoints = 0;
1509
1510 #ifdef __x86_64__
1511 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1512 used for fast tracepoints. */
1513 if (is_64bit_tdesc ())
1514 return 5;
1515 #endif
1516
1517 if (agent_loaded_p ())
1518 {
1519 char errbuf[IPA_BUFSIZ];
1520
1521 errbuf[0] = '\0';
1522
1523 /* On x86, if trampolines are available, then 4-byte jump instructions
1524 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1525 with a 4-byte offset are used instead. */
1526 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1527 return 4;
1528 else
1529 {
1530 /* GDB has no channel to explain to user why a shorter fast
1531 tracepoint is not possible, but at least make GDBserver
1532 mention that something has gone awry. */
1533 if (!warned_about_fast_tracepoints)
1534 {
1535 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1536 warned_about_fast_tracepoints = 1;
1537 }
1538 return 5;
1539 }
1540 }
1541 else
1542 {
1543 /* Indicate that the minimum length is currently unknown since the IPA
1544 has not loaded yet. */
1545 return 0;
1546 }
1547 }
1548
1549 static void
1550 add_insns (unsigned char *start, int len)
1551 {
1552 CORE_ADDR buildaddr = current_insn_ptr;
1553
1554 if (debug_threads)
1555 debug_printf ("Adding %d bytes of insn at %s\n",
1556 len, paddress (buildaddr));
1557
1558 append_insns (&buildaddr, len, start);
1559 current_insn_ptr = buildaddr;
1560 }
1561
1562 /* Our general strategy for emitting code is to avoid specifying raw
1563 bytes whenever possible, and instead copy a block of inline asm
1564 that is embedded in the function. This is a little messy, because
1565 we need to keep the compiler from discarding what looks like dead
1566 code, plus suppress various warnings. */
1567
1568 #define EMIT_ASM(NAME, INSNS) \
1569 do \
1570 { \
1571 extern unsigned char start_ ## NAME, end_ ## NAME; \
1572 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1573 __asm__ ("jmp end_" #NAME "\n" \
1574 "\t" "start_" #NAME ":" \
1575 "\t" INSNS "\n" \
1576 "\t" "end_" #NAME ":"); \
1577 } while (0)
1578
1579 #ifdef __x86_64__
1580
1581 #define EMIT_ASM32(NAME,INSNS) \
1582 do \
1583 { \
1584 extern unsigned char start_ ## NAME, end_ ## NAME; \
1585 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1586 __asm__ (".code32\n" \
1587 "\t" "jmp end_" #NAME "\n" \
1588 "\t" "start_" #NAME ":\n" \
1589 "\t" INSNS "\n" \
1590 "\t" "end_" #NAME ":\n" \
1591 ".code64\n"); \
1592 } while (0)
1593
1594 #else
1595
1596 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1597
1598 #endif
1599
1600 #ifdef __x86_64__
1601
1602 static void
1603 amd64_emit_prologue (void)
1604 {
1605 EMIT_ASM (amd64_prologue,
1606 "pushq %rbp\n\t"
1607 "movq %rsp,%rbp\n\t"
1608 "sub $0x20,%rsp\n\t"
1609 "movq %rdi,-8(%rbp)\n\t"
1610 "movq %rsi,-16(%rbp)");
1611 }
1612
1613
1614 static void
1615 amd64_emit_epilogue (void)
1616 {
1617 EMIT_ASM (amd64_epilogue,
1618 "movq -16(%rbp),%rdi\n\t"
1619 "movq %rax,(%rdi)\n\t"
1620 "xor %rax,%rax\n\t"
1621 "leave\n\t"
1622 "ret");
1623 }
1624
1625 static void
1626 amd64_emit_add (void)
1627 {
1628 EMIT_ASM (amd64_add,
1629 "add (%rsp),%rax\n\t"
1630 "lea 0x8(%rsp),%rsp");
1631 }
1632
1633 static void
1634 amd64_emit_sub (void)
1635 {
1636 EMIT_ASM (amd64_sub,
1637 "sub %rax,(%rsp)\n\t"
1638 "pop %rax");
1639 }
1640
1641 static void
1642 amd64_emit_mul (void)
1643 {
1644 emit_error = 1;
1645 }
1646
1647 static void
1648 amd64_emit_lsh (void)
1649 {
1650 emit_error = 1;
1651 }
1652
1653 static void
1654 amd64_emit_rsh_signed (void)
1655 {
1656 emit_error = 1;
1657 }
1658
1659 static void
1660 amd64_emit_rsh_unsigned (void)
1661 {
1662 emit_error = 1;
1663 }
1664
1665 static void
1666 amd64_emit_ext (int arg)
1667 {
1668 switch (arg)
1669 {
1670 case 8:
1671 EMIT_ASM (amd64_ext_8,
1672 "cbtw\n\t"
1673 "cwtl\n\t"
1674 "cltq");
1675 break;
1676 case 16:
1677 EMIT_ASM (amd64_ext_16,
1678 "cwtl\n\t"
1679 "cltq");
1680 break;
1681 case 32:
1682 EMIT_ASM (amd64_ext_32,
1683 "cltq");
1684 break;
1685 default:
1686 emit_error = 1;
1687 }
1688 }
1689
1690 static void
1691 amd64_emit_log_not (void)
1692 {
1693 EMIT_ASM (amd64_log_not,
1694 "test %rax,%rax\n\t"
1695 "sete %cl\n\t"
1696 "movzbq %cl,%rax");
1697 }
1698
1699 static void
1700 amd64_emit_bit_and (void)
1701 {
1702 EMIT_ASM (amd64_and,
1703 "and (%rsp),%rax\n\t"
1704 "lea 0x8(%rsp),%rsp");
1705 }
1706
1707 static void
1708 amd64_emit_bit_or (void)
1709 {
1710 EMIT_ASM (amd64_or,
1711 "or (%rsp),%rax\n\t"
1712 "lea 0x8(%rsp),%rsp");
1713 }
1714
1715 static void
1716 amd64_emit_bit_xor (void)
1717 {
1718 EMIT_ASM (amd64_xor,
1719 "xor (%rsp),%rax\n\t"
1720 "lea 0x8(%rsp),%rsp");
1721 }
1722
1723 static void
1724 amd64_emit_bit_not (void)
1725 {
1726 EMIT_ASM (amd64_bit_not,
1727 "xorq $0xffffffffffffffff,%rax");
1728 }
1729
1730 static void
1731 amd64_emit_equal (void)
1732 {
1733 EMIT_ASM (amd64_equal,
1734 "cmp %rax,(%rsp)\n\t"
1735 "je .Lamd64_equal_true\n\t"
1736 "xor %rax,%rax\n\t"
1737 "jmp .Lamd64_equal_end\n\t"
1738 ".Lamd64_equal_true:\n\t"
1739 "mov $0x1,%rax\n\t"
1740 ".Lamd64_equal_end:\n\t"
1741 "lea 0x8(%rsp),%rsp");
1742 }
1743
1744 static void
1745 amd64_emit_less_signed (void)
1746 {
1747 EMIT_ASM (amd64_less_signed,
1748 "cmp %rax,(%rsp)\n\t"
1749 "jl .Lamd64_less_signed_true\n\t"
1750 "xor %rax,%rax\n\t"
1751 "jmp .Lamd64_less_signed_end\n\t"
1752 ".Lamd64_less_signed_true:\n\t"
1753 "mov $1,%rax\n\t"
1754 ".Lamd64_less_signed_end:\n\t"
1755 "lea 0x8(%rsp),%rsp");
1756 }
1757
1758 static void
1759 amd64_emit_less_unsigned (void)
1760 {
1761 EMIT_ASM (amd64_less_unsigned,
1762 "cmp %rax,(%rsp)\n\t"
1763 "jb .Lamd64_less_unsigned_true\n\t"
1764 "xor %rax,%rax\n\t"
1765 "jmp .Lamd64_less_unsigned_end\n\t"
1766 ".Lamd64_less_unsigned_true:\n\t"
1767 "mov $1,%rax\n\t"
1768 ".Lamd64_less_unsigned_end:\n\t"
1769 "lea 0x8(%rsp),%rsp");
1770 }
1771
1772 static void
1773 amd64_emit_ref (int size)
1774 {
1775 switch (size)
1776 {
1777 case 1:
1778 EMIT_ASM (amd64_ref1,
1779 "movb (%rax),%al");
1780 break;
1781 case 2:
1782 EMIT_ASM (amd64_ref2,
1783 "movw (%rax),%ax");
1784 break;
1785 case 4:
1786 EMIT_ASM (amd64_ref4,
1787 "movl (%rax),%eax");
1788 break;
1789 case 8:
1790 EMIT_ASM (amd64_ref8,
1791 "movq (%rax),%rax");
1792 break;
1793 }
1794 }
1795
1796 static void
1797 amd64_emit_if_goto (int *offset_p, int *size_p)
1798 {
1799 EMIT_ASM (amd64_if_goto,
1800 "mov %rax,%rcx\n\t"
1801 "pop %rax\n\t"
1802 "cmp $0,%rcx\n\t"
1803 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1804 if (offset_p)
1805 *offset_p = 10;
1806 if (size_p)
1807 *size_p = 4;
1808 }
1809
1810 static void
1811 amd64_emit_goto (int *offset_p, int *size_p)
1812 {
1813 EMIT_ASM (amd64_goto,
1814 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1815 if (offset_p)
1816 *offset_p = 1;
1817 if (size_p)
1818 *size_p = 4;
1819 }
1820
1821 static void
1822 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1823 {
1824 int diff = (to - (from + size));
1825 unsigned char buf[sizeof (int)];
1826
1827 if (size != 4)
1828 {
1829 emit_error = 1;
1830 return;
1831 }
1832
1833 memcpy (buf, &diff, sizeof (int));
1834 write_inferior_memory (from, buf, sizeof (int));
1835 }
1836
1837 static void
1838 amd64_emit_const (LONGEST num)
1839 {
1840 unsigned char buf[16];
1841 int i;
1842 CORE_ADDR buildaddr = current_insn_ptr;
1843
1844 i = 0;
1845 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1846 memcpy (&buf[i], &num, sizeof (num));
1847 i += 8;
1848 append_insns (&buildaddr, i, buf);
1849 current_insn_ptr = buildaddr;
1850 }
1851
1852 static void
1853 amd64_emit_call (CORE_ADDR fn)
1854 {
1855 unsigned char buf[16];
1856 int i;
1857 CORE_ADDR buildaddr;
1858 LONGEST offset64;
1859
1860 /* The destination function being in the shared library, may be
1861 >31-bits away off the compiled code pad. */
1862
1863 buildaddr = current_insn_ptr;
1864
1865 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1866
1867 i = 0;
1868
1869 if (offset64 > INT_MAX || offset64 < INT_MIN)
1870 {
1871 /* Offset is too large for a call. Use callq, but that requires
1872 a register, so avoid it if possible. Use r10, since it is
1873 call-clobbered, we don't have to push/pop it. */
1874 buf[i++] = 0x48; /* mov $fn,%r10 */
1875 buf[i++] = 0xba;
1876 memcpy (buf + i, &fn, 8);
1877 i += 8;
1878 buf[i++] = 0xff; /* callq *%r10 */
1879 buf[i++] = 0xd2;
1880 }
1881 else
1882 {
1883 int offset32 = offset64; /* we know we can't overflow here. */
1884
1885 buf[i++] = 0xe8; /* call <reladdr> */
1886 memcpy (buf + i, &offset32, 4);
1887 i += 4;
1888 }
1889
1890 append_insns (&buildaddr, i, buf);
1891 current_insn_ptr = buildaddr;
1892 }
1893
1894 static void
1895 amd64_emit_reg (int reg)
1896 {
1897 unsigned char buf[16];
1898 int i;
1899 CORE_ADDR buildaddr;
1900
1901 /* Assume raw_regs is still in %rdi. */
1902 buildaddr = current_insn_ptr;
1903 i = 0;
1904 buf[i++] = 0xbe; /* mov $<n>,%esi */
1905 memcpy (&buf[i], &reg, sizeof (reg));
1906 i += 4;
1907 append_insns (&buildaddr, i, buf);
1908 current_insn_ptr = buildaddr;
1909 amd64_emit_call (get_raw_reg_func_addr ());
1910 }
1911
1912 static void
1913 amd64_emit_pop (void)
1914 {
1915 EMIT_ASM (amd64_pop,
1916 "pop %rax");
1917 }
1918
1919 static void
1920 amd64_emit_stack_flush (void)
1921 {
1922 EMIT_ASM (amd64_stack_flush,
1923 "push %rax");
1924 }
1925
1926 static void
1927 amd64_emit_zero_ext (int arg)
1928 {
1929 switch (arg)
1930 {
1931 case 8:
1932 EMIT_ASM (amd64_zero_ext_8,
1933 "and $0xff,%rax");
1934 break;
1935 case 16:
1936 EMIT_ASM (amd64_zero_ext_16,
1937 "and $0xffff,%rax");
1938 break;
1939 case 32:
1940 EMIT_ASM (amd64_zero_ext_32,
1941 "mov $0xffffffff,%rcx\n\t"
1942 "and %rcx,%rax");
1943 break;
1944 default:
1945 emit_error = 1;
1946 }
1947 }
1948
1949 static void
1950 amd64_emit_swap (void)
1951 {
1952 EMIT_ASM (amd64_swap,
1953 "mov %rax,%rcx\n\t"
1954 "pop %rax\n\t"
1955 "push %rcx");
1956 }
1957
1958 static void
1959 amd64_emit_stack_adjust (int n)
1960 {
1961 unsigned char buf[16];
1962 int i;
1963 CORE_ADDR buildaddr = current_insn_ptr;
1964
1965 i = 0;
1966 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1967 buf[i++] = 0x8d;
1968 buf[i++] = 0x64;
1969 buf[i++] = 0x24;
1970 /* This only handles adjustments up to 16, but we don't expect any more. */
1971 buf[i++] = n * 8;
1972 append_insns (&buildaddr, i, buf);
1973 current_insn_ptr = buildaddr;
1974 }
1975
1976 /* FN's prototype is `LONGEST(*fn)(int)'. */
1977
1978 static void
1979 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1980 {
1981 unsigned char buf[16];
1982 int i;
1983 CORE_ADDR buildaddr;
1984
1985 buildaddr = current_insn_ptr;
1986 i = 0;
1987 buf[i++] = 0xbf; /* movl $<n>,%edi */
1988 memcpy (&buf[i], &arg1, sizeof (arg1));
1989 i += 4;
1990 append_insns (&buildaddr, i, buf);
1991 current_insn_ptr = buildaddr;
1992 amd64_emit_call (fn);
1993 }
1994
1995 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1996
1997 static void
1998 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1999 {
2000 unsigned char buf[16];
2001 int i;
2002 CORE_ADDR buildaddr;
2003
2004 buildaddr = current_insn_ptr;
2005 i = 0;
2006 buf[i++] = 0xbf; /* movl $<n>,%edi */
2007 memcpy (&buf[i], &arg1, sizeof (arg1));
2008 i += 4;
2009 append_insns (&buildaddr, i, buf);
2010 current_insn_ptr = buildaddr;
2011 EMIT_ASM (amd64_void_call_2_a,
2012 /* Save away a copy of the stack top. */
2013 "push %rax\n\t"
2014 /* Also pass top as the second argument. */
2015 "mov %rax,%rsi");
2016 amd64_emit_call (fn);
2017 EMIT_ASM (amd64_void_call_2_b,
2018 /* Restore the stack top, %rax may have been trashed. */
2019 "pop %rax");
2020 }
2021
2022 void
2023 amd64_emit_eq_goto (int *offset_p, int *size_p)
2024 {
2025 EMIT_ASM (amd64_eq,
2026 "cmp %rax,(%rsp)\n\t"
2027 "jne .Lamd64_eq_fallthru\n\t"
2028 "lea 0x8(%rsp),%rsp\n\t"
2029 "pop %rax\n\t"
2030 /* jmp, but don't trust the assembler to choose the right jump */
2031 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2032 ".Lamd64_eq_fallthru:\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax");
2035
2036 if (offset_p)
2037 *offset_p = 13;
2038 if (size_p)
2039 *size_p = 4;
2040 }
2041
2042 void
2043 amd64_emit_ne_goto (int *offset_p, int *size_p)
2044 {
2045 EMIT_ASM (amd64_ne,
2046 "cmp %rax,(%rsp)\n\t"
2047 "je .Lamd64_ne_fallthru\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2049 "pop %rax\n\t"
2050 /* jmp, but don't trust the assembler to choose the right jump */
2051 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2052 ".Lamd64_ne_fallthru:\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax");
2055
2056 if (offset_p)
2057 *offset_p = 13;
2058 if (size_p)
2059 *size_p = 4;
2060 }
2061
2062 void
2063 amd64_emit_lt_goto (int *offset_p, int *size_p)
2064 {
2065 EMIT_ASM (amd64_lt,
2066 "cmp %rax,(%rsp)\n\t"
2067 "jnl .Lamd64_lt_fallthru\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2069 "pop %rax\n\t"
2070 /* jmp, but don't trust the assembler to choose the right jump */
2071 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2072 ".Lamd64_lt_fallthru:\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax");
2075
2076 if (offset_p)
2077 *offset_p = 13;
2078 if (size_p)
2079 *size_p = 4;
2080 }
2081
2082 void
2083 amd64_emit_le_goto (int *offset_p, int *size_p)
2084 {
2085 EMIT_ASM (amd64_le,
2086 "cmp %rax,(%rsp)\n\t"
2087 "jnle .Lamd64_le_fallthru\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2089 "pop %rax\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_le_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax");
2095
2096 if (offset_p)
2097 *offset_p = 13;
2098 if (size_p)
2099 *size_p = 4;
2100 }
2101
2102 void
2103 amd64_emit_gt_goto (int *offset_p, int *size_p)
2104 {
2105 EMIT_ASM (amd64_gt,
2106 "cmp %rax,(%rsp)\n\t"
2107 "jng .Lamd64_gt_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_gt_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax");
2115
2116 if (offset_p)
2117 *offset_p = 13;
2118 if (size_p)
2119 *size_p = 4;
2120 }
2121
2122 void
2123 amd64_emit_ge_goto (int *offset_p, int *size_p)
2124 {
2125 EMIT_ASM (amd64_ge,
2126 "cmp %rax,(%rsp)\n\t"
2127 "jnge .Lamd64_ge_fallthru\n\t"
2128 ".Lamd64_ge_jump:\n\t"
2129 "lea 0x8(%rsp),%rsp\n\t"
2130 "pop %rax\n\t"
2131 /* jmp, but don't trust the assembler to choose the right jump */
2132 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2133 ".Lamd64_ge_fallthru:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2135 "pop %rax");
2136
2137 if (offset_p)
2138 *offset_p = 13;
2139 if (size_p)
2140 *size_p = 4;
2141 }
2142
2143 struct emit_ops amd64_emit_ops =
2144 {
2145 amd64_emit_prologue,
2146 amd64_emit_epilogue,
2147 amd64_emit_add,
2148 amd64_emit_sub,
2149 amd64_emit_mul,
2150 amd64_emit_lsh,
2151 amd64_emit_rsh_signed,
2152 amd64_emit_rsh_unsigned,
2153 amd64_emit_ext,
2154 amd64_emit_log_not,
2155 amd64_emit_bit_and,
2156 amd64_emit_bit_or,
2157 amd64_emit_bit_xor,
2158 amd64_emit_bit_not,
2159 amd64_emit_equal,
2160 amd64_emit_less_signed,
2161 amd64_emit_less_unsigned,
2162 amd64_emit_ref,
2163 amd64_emit_if_goto,
2164 amd64_emit_goto,
2165 amd64_write_goto_address,
2166 amd64_emit_const,
2167 amd64_emit_call,
2168 amd64_emit_reg,
2169 amd64_emit_pop,
2170 amd64_emit_stack_flush,
2171 amd64_emit_zero_ext,
2172 amd64_emit_swap,
2173 amd64_emit_stack_adjust,
2174 amd64_emit_int_call_1,
2175 amd64_emit_void_call_2,
2176 amd64_emit_eq_goto,
2177 amd64_emit_ne_goto,
2178 amd64_emit_lt_goto,
2179 amd64_emit_le_goto,
2180 amd64_emit_gt_goto,
2181 amd64_emit_ge_goto
2182 };
2183
2184 #endif /* __x86_64__ */
2185
2186 static void
2187 i386_emit_prologue (void)
2188 {
2189 EMIT_ASM32 (i386_prologue,
2190 "push %ebp\n\t"
2191 "mov %esp,%ebp\n\t"
2192 "push %ebx");
2193 /* At this point, the raw regs base address is at 8(%ebp), and the
2194 value pointer is at 12(%ebp). */
2195 }
2196
2197 static void
2198 i386_emit_epilogue (void)
2199 {
2200 EMIT_ASM32 (i386_epilogue,
2201 "mov 12(%ebp),%ecx\n\t"
2202 "mov %eax,(%ecx)\n\t"
2203 "mov %ebx,0x4(%ecx)\n\t"
2204 "xor %eax,%eax\n\t"
2205 "pop %ebx\n\t"
2206 "pop %ebp\n\t"
2207 "ret");
2208 }
2209
2210 static void
2211 i386_emit_add (void)
2212 {
2213 EMIT_ASM32 (i386_add,
2214 "add (%esp),%eax\n\t"
2215 "adc 0x4(%esp),%ebx\n\t"
2216 "lea 0x8(%esp),%esp");
2217 }
2218
2219 static void
2220 i386_emit_sub (void)
2221 {
2222 EMIT_ASM32 (i386_sub,
2223 "subl %eax,(%esp)\n\t"
2224 "sbbl %ebx,4(%esp)\n\t"
2225 "pop %eax\n\t"
2226 "pop %ebx\n\t");
2227 }
2228
2229 static void
2230 i386_emit_mul (void)
2231 {
2232 emit_error = 1;
2233 }
2234
2235 static void
2236 i386_emit_lsh (void)
2237 {
2238 emit_error = 1;
2239 }
2240
2241 static void
2242 i386_emit_rsh_signed (void)
2243 {
2244 emit_error = 1;
2245 }
2246
2247 static void
2248 i386_emit_rsh_unsigned (void)
2249 {
2250 emit_error = 1;
2251 }
2252
2253 static void
2254 i386_emit_ext (int arg)
2255 {
2256 switch (arg)
2257 {
2258 case 8:
2259 EMIT_ASM32 (i386_ext_8,
2260 "cbtw\n\t"
2261 "cwtl\n\t"
2262 "movl %eax,%ebx\n\t"
2263 "sarl $31,%ebx");
2264 break;
2265 case 16:
2266 EMIT_ASM32 (i386_ext_16,
2267 "cwtl\n\t"
2268 "movl %eax,%ebx\n\t"
2269 "sarl $31,%ebx");
2270 break;
2271 case 32:
2272 EMIT_ASM32 (i386_ext_32,
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 default:
2277 emit_error = 1;
2278 }
2279 }
2280
2281 static void
2282 i386_emit_log_not (void)
2283 {
2284 EMIT_ASM32 (i386_log_not,
2285 "or %ebx,%eax\n\t"
2286 "test %eax,%eax\n\t"
2287 "sete %cl\n\t"
2288 "xor %ebx,%ebx\n\t"
2289 "movzbl %cl,%eax");
2290 }
2291
2292 static void
2293 i386_emit_bit_and (void)
2294 {
2295 EMIT_ASM32 (i386_and,
2296 "and (%esp),%eax\n\t"
2297 "and 0x4(%esp),%ebx\n\t"
2298 "lea 0x8(%esp),%esp");
2299 }
2300
2301 static void
2302 i386_emit_bit_or (void)
2303 {
2304 EMIT_ASM32 (i386_or,
2305 "or (%esp),%eax\n\t"
2306 "or 0x4(%esp),%ebx\n\t"
2307 "lea 0x8(%esp),%esp");
2308 }
2309
2310 static void
2311 i386_emit_bit_xor (void)
2312 {
2313 EMIT_ASM32 (i386_xor,
2314 "xor (%esp),%eax\n\t"
2315 "xor 0x4(%esp),%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2317 }
2318
2319 static void
2320 i386_emit_bit_not (void)
2321 {
2322 EMIT_ASM32 (i386_bit_not,
2323 "xor $0xffffffff,%eax\n\t"
2324 "xor $0xffffffff,%ebx\n\t");
2325 }
2326
2327 static void
2328 i386_emit_equal (void)
2329 {
2330 EMIT_ASM32 (i386_equal,
2331 "cmpl %ebx,4(%esp)\n\t"
2332 "jne .Li386_equal_false\n\t"
2333 "cmpl %eax,(%esp)\n\t"
2334 "je .Li386_equal_true\n\t"
2335 ".Li386_equal_false:\n\t"
2336 "xor %eax,%eax\n\t"
2337 "jmp .Li386_equal_end\n\t"
2338 ".Li386_equal_true:\n\t"
2339 "mov $1,%eax\n\t"
2340 ".Li386_equal_end:\n\t"
2341 "xor %ebx,%ebx\n\t"
2342 "lea 0x8(%esp),%esp");
2343 }
2344
2345 static void
2346 i386_emit_less_signed (void)
2347 {
2348 EMIT_ASM32 (i386_less_signed,
2349 "cmpl %ebx,4(%esp)\n\t"
2350 "jl .Li386_less_signed_true\n\t"
2351 "jne .Li386_less_signed_false\n\t"
2352 "cmpl %eax,(%esp)\n\t"
2353 "jl .Li386_less_signed_true\n\t"
2354 ".Li386_less_signed_false:\n\t"
2355 "xor %eax,%eax\n\t"
2356 "jmp .Li386_less_signed_end\n\t"
2357 ".Li386_less_signed_true:\n\t"
2358 "mov $1,%eax\n\t"
2359 ".Li386_less_signed_end:\n\t"
2360 "xor %ebx,%ebx\n\t"
2361 "lea 0x8(%esp),%esp");
2362 }
2363
2364 static void
2365 i386_emit_less_unsigned (void)
2366 {
2367 EMIT_ASM32 (i386_less_unsigned,
2368 "cmpl %ebx,4(%esp)\n\t"
2369 "jb .Li386_less_unsigned_true\n\t"
2370 "jne .Li386_less_unsigned_false\n\t"
2371 "cmpl %eax,(%esp)\n\t"
2372 "jb .Li386_less_unsigned_true\n\t"
2373 ".Li386_less_unsigned_false:\n\t"
2374 "xor %eax,%eax\n\t"
2375 "jmp .Li386_less_unsigned_end\n\t"
2376 ".Li386_less_unsigned_true:\n\t"
2377 "mov $1,%eax\n\t"
2378 ".Li386_less_unsigned_end:\n\t"
2379 "xor %ebx,%ebx\n\t"
2380 "lea 0x8(%esp),%esp");
2381 }
2382
2383 static void
2384 i386_emit_ref (int size)
2385 {
2386 switch (size)
2387 {
2388 case 1:
2389 EMIT_ASM32 (i386_ref1,
2390 "movb (%eax),%al");
2391 break;
2392 case 2:
2393 EMIT_ASM32 (i386_ref2,
2394 "movw (%eax),%ax");
2395 break;
2396 case 4:
2397 EMIT_ASM32 (i386_ref4,
2398 "movl (%eax),%eax");
2399 break;
2400 case 8:
2401 EMIT_ASM32 (i386_ref8,
2402 "movl 4(%eax),%ebx\n\t"
2403 "movl (%eax),%eax");
2404 break;
2405 }
2406 }
2407
2408 static void
2409 i386_emit_if_goto (int *offset_p, int *size_p)
2410 {
2411 EMIT_ASM32 (i386_if_goto,
2412 "mov %eax,%ecx\n\t"
2413 "or %ebx,%ecx\n\t"
2414 "pop %eax\n\t"
2415 "pop %ebx\n\t"
2416 "cmpl $0,%ecx\n\t"
2417 /* Don't trust the assembler to choose the right jump */
2418 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2419
2420 if (offset_p)
2421 *offset_p = 11; /* be sure that this matches the sequence above */
2422 if (size_p)
2423 *size_p = 4;
2424 }
2425
2426 static void
2427 i386_emit_goto (int *offset_p, int *size_p)
2428 {
2429 EMIT_ASM32 (i386_goto,
2430 /* Don't trust the assembler to choose the right jump */
2431 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2432 if (offset_p)
2433 *offset_p = 1;
2434 if (size_p)
2435 *size_p = 4;
2436 }
2437
2438 static void
2439 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2440 {
2441 int diff = (to - (from + size));
2442 unsigned char buf[sizeof (int)];
2443
2444 /* We're only doing 4-byte sizes at the moment. */
2445 if (size != 4)
2446 {
2447 emit_error = 1;
2448 return;
2449 }
2450
2451 memcpy (buf, &diff, sizeof (int));
2452 write_inferior_memory (from, buf, sizeof (int));
2453 }
2454
2455 static void
2456 i386_emit_const (LONGEST num)
2457 {
2458 unsigned char buf[16];
2459 int i, hi, lo;
2460 CORE_ADDR buildaddr = current_insn_ptr;
2461
2462 i = 0;
2463 buf[i++] = 0xb8; /* mov $<n>,%eax */
2464 lo = num & 0xffffffff;
2465 memcpy (&buf[i], &lo, sizeof (lo));
2466 i += 4;
2467 hi = ((num >> 32) & 0xffffffff);
2468 if (hi)
2469 {
2470 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2471 memcpy (&buf[i], &hi, sizeof (hi));
2472 i += 4;
2473 }
2474 else
2475 {
2476 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2477 }
2478 append_insns (&buildaddr, i, buf);
2479 current_insn_ptr = buildaddr;
2480 }
2481
2482 static void
2483 i386_emit_call (CORE_ADDR fn)
2484 {
2485 unsigned char buf[16];
2486 int i, offset;
2487 CORE_ADDR buildaddr;
2488
2489 buildaddr = current_insn_ptr;
2490 i = 0;
2491 buf[i++] = 0xe8; /* call <reladdr> */
2492 offset = ((int) fn) - (buildaddr + 5);
2493 memcpy (buf + 1, &offset, 4);
2494 append_insns (&buildaddr, 5, buf);
2495 current_insn_ptr = buildaddr;
2496 }
2497
2498 static void
2499 i386_emit_reg (int reg)
2500 {
2501 unsigned char buf[16];
2502 int i;
2503 CORE_ADDR buildaddr;
2504
2505 EMIT_ASM32 (i386_reg_a,
2506 "sub $0x8,%esp");
2507 buildaddr = current_insn_ptr;
2508 i = 0;
2509 buf[i++] = 0xb8; /* mov $<n>,%eax */
2510 memcpy (&buf[i], &reg, sizeof (reg));
2511 i += 4;
2512 append_insns (&buildaddr, i, buf);
2513 current_insn_ptr = buildaddr;
2514 EMIT_ASM32 (i386_reg_b,
2515 "mov %eax,4(%esp)\n\t"
2516 "mov 8(%ebp),%eax\n\t"
2517 "mov %eax,(%esp)");
2518 i386_emit_call (get_raw_reg_func_addr ());
2519 EMIT_ASM32 (i386_reg_c,
2520 "xor %ebx,%ebx\n\t"
2521 "lea 0x8(%esp),%esp");
2522 }
2523
2524 static void
2525 i386_emit_pop (void)
2526 {
2527 EMIT_ASM32 (i386_pop,
2528 "pop %eax\n\t"
2529 "pop %ebx");
2530 }
2531
2532 static void
2533 i386_emit_stack_flush (void)
2534 {
2535 EMIT_ASM32 (i386_stack_flush,
2536 "push %ebx\n\t"
2537 "push %eax");
2538 }
2539
2540 static void
2541 i386_emit_zero_ext (int arg)
2542 {
2543 switch (arg)
2544 {
2545 case 8:
2546 EMIT_ASM32 (i386_zero_ext_8,
2547 "and $0xff,%eax\n\t"
2548 "xor %ebx,%ebx");
2549 break;
2550 case 16:
2551 EMIT_ASM32 (i386_zero_ext_16,
2552 "and $0xffff,%eax\n\t"
2553 "xor %ebx,%ebx");
2554 break;
2555 case 32:
2556 EMIT_ASM32 (i386_zero_ext_32,
2557 "xor %ebx,%ebx");
2558 break;
2559 default:
2560 emit_error = 1;
2561 }
2562 }
2563
2564 static void
2565 i386_emit_swap (void)
2566 {
2567 EMIT_ASM32 (i386_swap,
2568 "mov %eax,%ecx\n\t"
2569 "mov %ebx,%edx\n\t"
2570 "pop %eax\n\t"
2571 "pop %ebx\n\t"
2572 "push %edx\n\t"
2573 "push %ecx");
2574 }
2575
2576 static void
2577 i386_emit_stack_adjust (int n)
2578 {
2579 unsigned char buf[16];
2580 int i;
2581 CORE_ADDR buildaddr = current_insn_ptr;
2582
2583 i = 0;
2584 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2585 buf[i++] = 0x64;
2586 buf[i++] = 0x24;
2587 buf[i++] = n * 8;
2588 append_insns (&buildaddr, i, buf);
2589 current_insn_ptr = buildaddr;
2590 }
2591
2592 /* FN's prototype is `LONGEST(*fn)(int)'. */
2593
2594 static void
2595 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2596 {
2597 unsigned char buf[16];
2598 int i;
2599 CORE_ADDR buildaddr;
2600
2601 EMIT_ASM32 (i386_int_call_1_a,
2602 /* Reserve a bit of stack space. */
2603 "sub $0x8,%esp");
2604 /* Put the one argument on the stack. */
2605 buildaddr = current_insn_ptr;
2606 i = 0;
2607 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2608 buf[i++] = 0x04;
2609 buf[i++] = 0x24;
2610 memcpy (&buf[i], &arg1, sizeof (arg1));
2611 i += 4;
2612 append_insns (&buildaddr, i, buf);
2613 current_insn_ptr = buildaddr;
2614 i386_emit_call (fn);
2615 EMIT_ASM32 (i386_int_call_1_c,
2616 "mov %edx,%ebx\n\t"
2617 "lea 0x8(%esp),%esp");
2618 }
2619
2620 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2621
2622 static void
2623 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2624 {
2625 unsigned char buf[16];
2626 int i;
2627 CORE_ADDR buildaddr;
2628
2629 EMIT_ASM32 (i386_void_call_2_a,
2630 /* Preserve %eax only; we don't have to worry about %ebx. */
2631 "push %eax\n\t"
2632 /* Reserve a bit of stack space for arguments. */
2633 "sub $0x10,%esp\n\t"
2634 /* Copy "top" to the second argument position. (Note that
2635 we can't assume function won't scribble on its
2636 arguments, so don't try to restore from this.) */
2637 "mov %eax,4(%esp)\n\t"
2638 "mov %ebx,8(%esp)");
2639 /* Put the first argument on the stack. */
2640 buildaddr = current_insn_ptr;
2641 i = 0;
2642 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2643 buf[i++] = 0x04;
2644 buf[i++] = 0x24;
2645 memcpy (&buf[i], &arg1, sizeof (arg1));
2646 i += 4;
2647 append_insns (&buildaddr, i, buf);
2648 current_insn_ptr = buildaddr;
2649 i386_emit_call (fn);
2650 EMIT_ASM32 (i386_void_call_2_b,
2651 "lea 0x10(%esp),%esp\n\t"
2652 /* Restore original stack top. */
2653 "pop %eax");
2654 }
2655
2656
2657 void
2658 i386_emit_eq_goto (int *offset_p, int *size_p)
2659 {
2660 EMIT_ASM32 (eq,
2661 /* Check low half first, more likely to be decider */
2662 "cmpl %eax,(%esp)\n\t"
2663 "jne .Leq_fallthru\n\t"
2664 "cmpl %ebx,4(%esp)\n\t"
2665 "jne .Leq_fallthru\n\t"
2666 "lea 0x8(%esp),%esp\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 /* jmp, but don't trust the assembler to choose the right jump */
2670 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2671 ".Leq_fallthru:\n\t"
2672 "lea 0x8(%esp),%esp\n\t"
2673 "pop %eax\n\t"
2674 "pop %ebx");
2675
2676 if (offset_p)
2677 *offset_p = 18;
2678 if (size_p)
2679 *size_p = 4;
2680 }
2681
2682 void
2683 i386_emit_ne_goto (int *offset_p, int *size_p)
2684 {
2685 EMIT_ASM32 (ne,
2686 /* Check low half first, more likely to be decider */
2687 "cmpl %eax,(%esp)\n\t"
2688 "jne .Lne_jump\n\t"
2689 "cmpl %ebx,4(%esp)\n\t"
2690 "je .Lne_fallthru\n\t"
2691 ".Lne_jump:\n\t"
2692 "lea 0x8(%esp),%esp\n\t"
2693 "pop %eax\n\t"
2694 "pop %ebx\n\t"
2695 /* jmp, but don't trust the assembler to choose the right jump */
2696 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2697 ".Lne_fallthru:\n\t"
2698 "lea 0x8(%esp),%esp\n\t"
2699 "pop %eax\n\t"
2700 "pop %ebx");
2701
2702 if (offset_p)
2703 *offset_p = 18;
2704 if (size_p)
2705 *size_p = 4;
2706 }
2707
2708 void
2709 i386_emit_lt_goto (int *offset_p, int *size_p)
2710 {
2711 EMIT_ASM32 (lt,
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "jl .Llt_jump\n\t"
2714 "jne .Llt_fallthru\n\t"
2715 "cmpl %eax,(%esp)\n\t"
2716 "jnl .Llt_fallthru\n\t"
2717 ".Llt_jump:\n\t"
2718 "lea 0x8(%esp),%esp\n\t"
2719 "pop %eax\n\t"
2720 "pop %ebx\n\t"
2721 /* jmp, but don't trust the assembler to choose the right jump */
2722 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2723 ".Llt_fallthru:\n\t"
2724 "lea 0x8(%esp),%esp\n\t"
2725 "pop %eax\n\t"
2726 "pop %ebx");
2727
2728 if (offset_p)
2729 *offset_p = 20;
2730 if (size_p)
2731 *size_p = 4;
2732 }
2733
2734 void
2735 i386_emit_le_goto (int *offset_p, int *size_p)
2736 {
2737 EMIT_ASM32 (le,
2738 "cmpl %ebx,4(%esp)\n\t"
2739 "jle .Lle_jump\n\t"
2740 "jne .Lle_fallthru\n\t"
2741 "cmpl %eax,(%esp)\n\t"
2742 "jnle .Lle_fallthru\n\t"
2743 ".Lle_jump:\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Lle_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx");
2753
2754 if (offset_p)
2755 *offset_p = 20;
2756 if (size_p)
2757 *size_p = 4;
2758 }
2759
2760 void
2761 i386_emit_gt_goto (int *offset_p, int *size_p)
2762 {
2763 EMIT_ASM32 (gt,
2764 "cmpl %ebx,4(%esp)\n\t"
2765 "jg .Lgt_jump\n\t"
2766 "jne .Lgt_fallthru\n\t"
2767 "cmpl %eax,(%esp)\n\t"
2768 "jng .Lgt_fallthru\n\t"
2769 ".Lgt_jump:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lgt_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx");
2779
2780 if (offset_p)
2781 *offset_p = 20;
2782 if (size_p)
2783 *size_p = 4;
2784 }
2785
2786 void
2787 i386_emit_ge_goto (int *offset_p, int *size_p)
2788 {
2789 EMIT_ASM32 (ge,
2790 "cmpl %ebx,4(%esp)\n\t"
2791 "jge .Lge_jump\n\t"
2792 "jne .Lge_fallthru\n\t"
2793 "cmpl %eax,(%esp)\n\t"
2794 "jnge .Lge_fallthru\n\t"
2795 ".Lge_jump:\n\t"
2796 "lea 0x8(%esp),%esp\n\t"
2797 "pop %eax\n\t"
2798 "pop %ebx\n\t"
2799 /* jmp, but don't trust the assembler to choose the right jump */
2800 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2801 ".Lge_fallthru:\n\t"
2802 "lea 0x8(%esp),%esp\n\t"
2803 "pop %eax\n\t"
2804 "pop %ebx");
2805
2806 if (offset_p)
2807 *offset_p = 20;
2808 if (size_p)
2809 *size_p = 4;
2810 }
2811
2812 struct emit_ops i386_emit_ops =
2813 {
2814 i386_emit_prologue,
2815 i386_emit_epilogue,
2816 i386_emit_add,
2817 i386_emit_sub,
2818 i386_emit_mul,
2819 i386_emit_lsh,
2820 i386_emit_rsh_signed,
2821 i386_emit_rsh_unsigned,
2822 i386_emit_ext,
2823 i386_emit_log_not,
2824 i386_emit_bit_and,
2825 i386_emit_bit_or,
2826 i386_emit_bit_xor,
2827 i386_emit_bit_not,
2828 i386_emit_equal,
2829 i386_emit_less_signed,
2830 i386_emit_less_unsigned,
2831 i386_emit_ref,
2832 i386_emit_if_goto,
2833 i386_emit_goto,
2834 i386_write_goto_address,
2835 i386_emit_const,
2836 i386_emit_call,
2837 i386_emit_reg,
2838 i386_emit_pop,
2839 i386_emit_stack_flush,
2840 i386_emit_zero_ext,
2841 i386_emit_swap,
2842 i386_emit_stack_adjust,
2843 i386_emit_int_call_1,
2844 i386_emit_void_call_2,
2845 i386_emit_eq_goto,
2846 i386_emit_ne_goto,
2847 i386_emit_lt_goto,
2848 i386_emit_le_goto,
2849 i386_emit_gt_goto,
2850 i386_emit_ge_goto
2851 };
2852
2853
2854 static struct emit_ops *
2855 x86_emit_ops (void)
2856 {
2857 #ifdef __x86_64__
2858 if (is_64bit_tdesc ())
2859 return &amd64_emit_ops;
2860 else
2861 #endif
2862 return &i386_emit_ops;
2863 }
2864
2865 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2866
2867 static const gdb_byte *
2868 x86_sw_breakpoint_from_kind (int kind, int *size)
2869 {
2870 *size = x86_breakpoint_len;
2871 return x86_breakpoint;
2872 }
2873
2874 static int
2875 x86_supports_range_stepping (void)
2876 {
2877 return 1;
2878 }
2879
2880 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2881 */
2882
2883 static int
2884 x86_supports_hardware_single_step (void)
2885 {
2886 return 1;
2887 }
2888
2889 static int
2890 x86_get_ipa_tdesc_idx (void)
2891 {
2892 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2893 const struct target_desc *tdesc = regcache->tdesc;
2894
2895 #ifdef __x86_64__
2896 if (tdesc == tdesc_amd64_linux || tdesc == tdesc_amd64_linux_no_xml
2897 || tdesc == tdesc_x32_linux)
2898 return X86_TDESC_SSE;
2899 if (tdesc == tdesc_amd64_avx_linux || tdesc == tdesc_x32_avx_linux)
2900 return X86_TDESC_AVX;
2901 if (tdesc == tdesc_amd64_mpx_linux)
2902 return X86_TDESC_MPX;
2903 if (tdesc == tdesc_amd64_avx_mpx_linux)
2904 return X86_TDESC_AVX_MPX;
2905 if (tdesc == tdesc_amd64_avx_mpx_avx512_linux || tdesc == tdesc_x32_avx_avx512_linux)
2906 return X86_TDESC_AVX_MPX_AVX512;
2907 if (tdesc == tdesc_amd64_avx_avx512_linux)
2908 return X86_TDESC_AVX_AVX512;
2909 #endif
2910
2911 if (tdesc == tdesc_i386_mmx_linux)
2912 return X86_TDESC_MMX;
2913 if (tdesc == tdesc_i386_linux || tdesc == tdesc_i386_linux_no_xml)
2914 return X86_TDESC_SSE;
2915 if (tdesc == tdesc_i386_avx_linux)
2916 return X86_TDESC_AVX;
2917 if (tdesc == tdesc_i386_mpx_linux)
2918 return X86_TDESC_MPX;
2919 if (tdesc == tdesc_i386_avx_mpx_linux)
2920 return X86_TDESC_AVX_MPX;
2921 if (tdesc == tdesc_i386_avx_mpx_avx512_linux)
2922 return X86_TDESC_AVX_MPX_AVX512;
2923 if (tdesc == tdesc_i386_avx_avx512_linux)
2924 return X86_TDESC_AVX_AVX512;
2925
2926 return 0;
2927 }
2928
2929 /* This is initialized assuming an amd64 target.
2930 x86_arch_setup will correct it for i386 or amd64 targets. */
2931
2932 struct linux_target_ops the_low_target =
2933 {
2934 x86_arch_setup,
2935 x86_linux_regs_info,
2936 x86_cannot_fetch_register,
2937 x86_cannot_store_register,
2938 NULL, /* fetch_register */
2939 x86_get_pc,
2940 x86_set_pc,
2941 NULL, /* breakpoint_kind_from_pc */
2942 x86_sw_breakpoint_from_kind,
2943 NULL,
2944 1,
2945 x86_breakpoint_at,
2946 x86_supports_z_point_type,
2947 x86_insert_point,
2948 x86_remove_point,
2949 x86_stopped_by_watchpoint,
2950 x86_stopped_data_address,
2951 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2952 native i386 case (no registers smaller than an xfer unit), and are not
2953 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2954 NULL,
2955 NULL,
2956 /* need to fix up i386 siginfo if host is amd64 */
2957 x86_siginfo_fixup,
2958 x86_linux_new_process,
2959 x86_linux_new_thread,
2960 x86_linux_new_fork,
2961 x86_linux_prepare_to_resume,
2962 x86_linux_process_qsupported,
2963 x86_supports_tracepoints,
2964 x86_get_thread_area,
2965 x86_install_fast_tracepoint_jump_pad,
2966 x86_emit_ops,
2967 x86_get_min_fast_tracepoint_insn_len,
2968 x86_supports_range_stepping,
2969 NULL, /* breakpoint_kind_from_current_state */
2970 x86_supports_hardware_single_step,
2971 x86_get_syscall_trapinfo,
2972 x86_get_ipa_tdesc_idx,
2973 };
2974
2975 void
2976 initialize_low_arch (void)
2977 {
2978 /* Initialize the Linux target descriptions. */
2979 #ifdef __x86_64__
2980 init_registers_amd64_linux ();
2981 init_registers_amd64_avx_linux ();
2982 init_registers_amd64_mpx_linux ();
2983 init_registers_amd64_avx_mpx_linux ();
2984 init_registers_amd64_avx_avx512_linux ();
2985 init_registers_amd64_avx_mpx_avx512_linux ();
2986
2987 init_registers_x32_linux ();
2988 init_registers_x32_avx_linux ();
2989 init_registers_x32_avx_avx512_linux ();
2990
2991 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
2992 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2993 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2994 #endif
2995 init_registers_i386_linux ();
2996 init_registers_i386_mmx_linux ();
2997 init_registers_i386_avx_linux ();
2998 init_registers_i386_mpx_linux ();
2999 init_registers_i386_avx_mpx_linux ();
3000 init_registers_i386_avx_avx512_linux ();
3001 init_registers_i386_avx_mpx_avx512_linux ();
3002
3003 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
3004 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3005 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3006
3007 initialize_regsets_info (&x86_regsets_info);
3008 }
This page took 0.118345 seconds and 5 git commands to generate.