Linux: sys/ptrace.h -> nat/gdb_ptrace.h everywhere
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33 #ifndef ELFMAG0
34 #include "elf/common.h"
35 #endif
36
37 #include "agent.h"
38 #include "tdesc.h"
39 #include "tracepoint.h"
40 #include "ax.h"
41 #include "nat/linux-nat.h"
42 #include "nat/x86-linux.h"
43 #include "nat/x86-linux-dregs.h"
44
45 #ifdef __x86_64__
46 /* Defined in auto-generated file amd64-linux.c. */
47 void init_registers_amd64_linux (void);
48 extern const struct target_desc *tdesc_amd64_linux;
49
50 /* Defined in auto-generated file amd64-avx-linux.c. */
51 void init_registers_amd64_avx_linux (void);
52 extern const struct target_desc *tdesc_amd64_avx_linux;
53
54 /* Defined in auto-generated file amd64-avx512-linux.c. */
55 void init_registers_amd64_avx512_linux (void);
56 extern const struct target_desc *tdesc_amd64_avx512_linux;
57
58 /* Defined in auto-generated file amd64-mpx-linux.c. */
59 void init_registers_amd64_mpx_linux (void);
60 extern const struct target_desc *tdesc_amd64_mpx_linux;
61
62 /* Defined in auto-generated file x32-linux.c. */
63 void init_registers_x32_linux (void);
64 extern const struct target_desc *tdesc_x32_linux;
65
66 /* Defined in auto-generated file x32-avx-linux.c. */
67 void init_registers_x32_avx_linux (void);
68 extern const struct target_desc *tdesc_x32_avx_linux;
69
70 /* Defined in auto-generated file x32-avx512-linux.c. */
71 void init_registers_x32_avx512_linux (void);
72 extern const struct target_desc *tdesc_x32_avx512_linux;
73
74 #endif
75
76 /* Defined in auto-generated file i386-linux.c. */
77 void init_registers_i386_linux (void);
78 extern const struct target_desc *tdesc_i386_linux;
79
80 /* Defined in auto-generated file i386-mmx-linux.c. */
81 void init_registers_i386_mmx_linux (void);
82 extern const struct target_desc *tdesc_i386_mmx_linux;
83
84 /* Defined in auto-generated file i386-avx-linux.c. */
85 void init_registers_i386_avx_linux (void);
86 extern const struct target_desc *tdesc_i386_avx_linux;
87
88 /* Defined in auto-generated file i386-avx512-linux.c. */
89 void init_registers_i386_avx512_linux (void);
90 extern const struct target_desc *tdesc_i386_avx512_linux;
91
92 /* Defined in auto-generated file i386-mpx-linux.c. */
93 void init_registers_i386_mpx_linux (void);
94 extern const struct target_desc *tdesc_i386_mpx_linux;
95
96 #ifdef __x86_64__
97 static struct target_desc *tdesc_amd64_linux_no_xml;
98 #endif
99 static struct target_desc *tdesc_i386_linux_no_xml;
100
101
102 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
103 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
104
105 /* Backward compatibility for gdb without XML support. */
106
107 static const char *xmltarget_i386_linux_no_xml = "@<target>\
108 <architecture>i386</architecture>\
109 <osabi>GNU/Linux</osabi>\
110 </target>";
111
112 #ifdef __x86_64__
113 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
114 <architecture>i386:x86-64</architecture>\
115 <osabi>GNU/Linux</osabi>\
116 </target>";
117 #endif
118
119 #include <sys/reg.h>
120 #include <sys/procfs.h>
121 #include "nat/gdb_ptrace.h"
122 #include <sys/uio.h>
123
124 #ifndef PTRACE_GET_THREAD_AREA
125 #define PTRACE_GET_THREAD_AREA 25
126 #endif
127
128 /* This definition comes from prctl.h, but some kernels may not have it. */
129 #ifndef PTRACE_ARCH_PRCTL
130 #define PTRACE_ARCH_PRCTL 30
131 #endif
132
133 /* The following definitions come from prctl.h, but may be absent
134 for certain configurations. */
135 #ifndef ARCH_GET_FS
136 #define ARCH_SET_GS 0x1001
137 #define ARCH_SET_FS 0x1002
138 #define ARCH_GET_FS 0x1003
139 #define ARCH_GET_GS 0x1004
140 #endif
141
142 /* Per-process arch-specific data we want to keep. */
143
144 struct arch_process_info
145 {
146 struct x86_debug_reg_state debug_reg_state;
147 };
148
149 #ifdef __x86_64__
150
151 /* Mapping between the general-purpose registers in `struct user'
152 format and GDB's register array layout.
153 Note that the transfer layout uses 64-bit regs. */
154 static /*const*/ int i386_regmap[] =
155 {
156 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
157 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
158 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
159 DS * 8, ES * 8, FS * 8, GS * 8
160 };
161
162 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
163
164 /* So code below doesn't have to care, i386 or amd64. */
165 #define ORIG_EAX ORIG_RAX
166 #define REGSIZE 8
167
168 static const int x86_64_regmap[] =
169 {
170 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
171 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
172 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
173 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
174 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
175 DS * 8, ES * 8, FS * 8, GS * 8,
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 ORIG_RAX * 8,
182 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
183 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
184 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1
193 };
194
195 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
196 #define X86_64_USER_REGS (GS + 1)
197
198 #else /* ! __x86_64__ */
199
200 /* Mapping between the general-purpose registers in `struct user'
201 format and GDB's register array layout. */
202 static /*const*/ int i386_regmap[] =
203 {
204 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
205 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
206 EIP * 4, EFL * 4, CS * 4, SS * 4,
207 DS * 4, ES * 4, FS * 4, GS * 4
208 };
209
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
212 #define REGSIZE 4
213
214 #endif
215
216 #ifdef __x86_64__
217
218 /* Returns true if the current inferior belongs to a x86-64 process,
219 per the tdesc. */
220
221 static int
222 is_64bit_tdesc (void)
223 {
224 struct regcache *regcache = get_thread_regcache (current_thread, 0);
225
226 return register_size (regcache->tdesc, 0) == 8;
227 }
228
229 #endif
230
231 \f
232 /* Called by libthread_db. */
233
234 ps_err_e
235 ps_get_thread_area (const struct ps_prochandle *ph,
236 lwpid_t lwpid, int idx, void **base)
237 {
238 #ifdef __x86_64__
239 int use_64bit = is_64bit_tdesc ();
240
241 if (use_64bit)
242 {
243 switch (idx)
244 {
245 case FS:
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
247 return PS_OK;
248 break;
249 case GS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
251 return PS_OK;
252 break;
253 default:
254 return PS_BADADDR;
255 }
256 return PS_ERR;
257 }
258 #endif
259
260 {
261 unsigned int desc[4];
262
263 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
264 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
265 return PS_ERR;
266
267 /* Ensure we properly extend the value to 64-bits for x86_64. */
268 *base = (void *) (uintptr_t) desc[1];
269 return PS_OK;
270 }
271 }
272
273 /* Get the thread area address. This is used to recognize which
274 thread is which when tracing with the in-process agent library. We
275 don't read anything from the address, and treat it as opaque; it's
276 the address itself that we assume is unique per-thread. */
277
278 static int
279 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
280 {
281 #ifdef __x86_64__
282 int use_64bit = is_64bit_tdesc ();
283
284 if (use_64bit)
285 {
286 void *base;
287 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
288 {
289 *addr = (CORE_ADDR) (uintptr_t) base;
290 return 0;
291 }
292
293 return -1;
294 }
295 #endif
296
297 {
298 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
299 struct thread_info *thr = get_lwp_thread (lwp);
300 struct regcache *regcache = get_thread_regcache (thr, 1);
301 unsigned int desc[4];
302 ULONGEST gs = 0;
303 const int reg_thread_area = 3; /* bits to scale down register value. */
304 int idx;
305
306 collect_register_by_name (regcache, "gs", &gs);
307
308 idx = gs >> reg_thread_area;
309
310 if (ptrace (PTRACE_GET_THREAD_AREA,
311 lwpid_of (thr),
312 (void *) (long) idx, (unsigned long) &desc) < 0)
313 return -1;
314
315 *addr = desc[1];
316 return 0;
317 }
318 }
319
320
321 \f
322 static int
323 x86_cannot_store_register (int regno)
324 {
325 #ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return 0;
328 #endif
329
330 return regno >= I386_NUM_REGS;
331 }
332
333 static int
334 x86_cannot_fetch_register (int regno)
335 {
336 #ifdef __x86_64__
337 if (is_64bit_tdesc ())
338 return 0;
339 #endif
340
341 return regno >= I386_NUM_REGS;
342 }
343
344 static void
345 x86_fill_gregset (struct regcache *regcache, void *buf)
346 {
347 int i;
348
349 #ifdef __x86_64__
350 if (register_size (regcache->tdesc, 0) == 8)
351 {
352 for (i = 0; i < X86_64_NUM_REGS; i++)
353 if (x86_64_regmap[i] != -1)
354 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
355 return;
356 }
357
358 /* 32-bit inferior registers need to be zero-extended.
359 Callers would read uninitialized memory otherwise. */
360 memset (buf, 0x00, X86_64_USER_REGS * 8);
361 #endif
362
363 for (i = 0; i < I386_NUM_REGS; i++)
364 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
365
366 collect_register_by_name (regcache, "orig_eax",
367 ((char *) buf) + ORIG_EAX * REGSIZE);
368 }
369
370 static void
371 x86_store_gregset (struct regcache *regcache, const void *buf)
372 {
373 int i;
374
375 #ifdef __x86_64__
376 if (register_size (regcache->tdesc, 0) == 8)
377 {
378 for (i = 0; i < X86_64_NUM_REGS; i++)
379 if (x86_64_regmap[i] != -1)
380 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
381 return;
382 }
383 #endif
384
385 for (i = 0; i < I386_NUM_REGS; i++)
386 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
387
388 supply_register_by_name (regcache, "orig_eax",
389 ((char *) buf) + ORIG_EAX * REGSIZE);
390 }
391
392 static void
393 x86_fill_fpregset (struct regcache *regcache, void *buf)
394 {
395 #ifdef __x86_64__
396 i387_cache_to_fxsave (regcache, buf);
397 #else
398 i387_cache_to_fsave (regcache, buf);
399 #endif
400 }
401
402 static void
403 x86_store_fpregset (struct regcache *regcache, const void *buf)
404 {
405 #ifdef __x86_64__
406 i387_fxsave_to_cache (regcache, buf);
407 #else
408 i387_fsave_to_cache (regcache, buf);
409 #endif
410 }
411
412 #ifndef __x86_64__
413
414 static void
415 x86_fill_fpxregset (struct regcache *regcache, void *buf)
416 {
417 i387_cache_to_fxsave (regcache, buf);
418 }
419
420 static void
421 x86_store_fpxregset (struct regcache *regcache, const void *buf)
422 {
423 i387_fxsave_to_cache (regcache, buf);
424 }
425
426 #endif
427
428 static void
429 x86_fill_xstateregset (struct regcache *regcache, void *buf)
430 {
431 i387_cache_to_xsave (regcache, buf);
432 }
433
434 static void
435 x86_store_xstateregset (struct regcache *regcache, const void *buf)
436 {
437 i387_xsave_to_cache (regcache, buf);
438 }
439
440 /* ??? The non-biarch i386 case stores all the i387 regs twice.
441 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
442 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
443 doesn't work. IWBN to avoid the duplication in the case where it
444 does work. Maybe the arch_setup routine could check whether it works
445 and update the supported regsets accordingly. */
446
447 static struct regset_info x86_regsets[] =
448 {
449 #ifdef HAVE_PTRACE_GETREGS
450 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
451 GENERAL_REGS,
452 x86_fill_gregset, x86_store_gregset },
453 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
454 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
455 # ifndef __x86_64__
456 # ifdef HAVE_PTRACE_GETFPXREGS
457 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
458 EXTENDED_REGS,
459 x86_fill_fpxregset, x86_store_fpxregset },
460 # endif
461 # endif
462 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
463 FP_REGS,
464 x86_fill_fpregset, x86_store_fpregset },
465 #endif /* HAVE_PTRACE_GETREGS */
466 { 0, 0, 0, -1, -1, NULL, NULL }
467 };
468
469 static CORE_ADDR
470 x86_get_pc (struct regcache *regcache)
471 {
472 int use_64bit = register_size (regcache->tdesc, 0) == 8;
473
474 if (use_64bit)
475 {
476 unsigned long pc;
477 collect_register_by_name (regcache, "rip", &pc);
478 return (CORE_ADDR) pc;
479 }
480 else
481 {
482 unsigned int pc;
483 collect_register_by_name (regcache, "eip", &pc);
484 return (CORE_ADDR) pc;
485 }
486 }
487
488 static void
489 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
490 {
491 int use_64bit = register_size (regcache->tdesc, 0) == 8;
492
493 if (use_64bit)
494 {
495 unsigned long newpc = pc;
496 supply_register_by_name (regcache, "rip", &newpc);
497 }
498 else
499 {
500 unsigned int newpc = pc;
501 supply_register_by_name (regcache, "eip", &newpc);
502 }
503 }
504 \f
505 static const unsigned char x86_breakpoint[] = { 0xCC };
506 #define x86_breakpoint_len 1
507
508 static int
509 x86_breakpoint_at (CORE_ADDR pc)
510 {
511 unsigned char c;
512
513 (*the_target->read_memory) (pc, &c, 1);
514 if (c == 0xCC)
515 return 1;
516
517 return 0;
518 }
519 \f
520 /* Low-level function vector. */
521 struct x86_dr_low_type x86_dr_low =
522 {
523 x86_linux_dr_set_control,
524 x86_linux_dr_set_addr,
525 x86_linux_dr_get_addr,
526 x86_linux_dr_get_status,
527 x86_linux_dr_get_control,
528 sizeof (void *),
529 };
530 \f
531 /* Breakpoint/Watchpoint support. */
532
533 static int
534 x86_supports_z_point_type (char z_type)
535 {
536 switch (z_type)
537 {
538 case Z_PACKET_SW_BP:
539 case Z_PACKET_HW_BP:
540 case Z_PACKET_WRITE_WP:
541 case Z_PACKET_ACCESS_WP:
542 return 1;
543 default:
544 return 0;
545 }
546 }
547
548 static int
549 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
550 int size, struct raw_breakpoint *bp)
551 {
552 struct process_info *proc = current_process ();
553
554 switch (type)
555 {
556 case raw_bkpt_type_hw:
557 case raw_bkpt_type_write_wp:
558 case raw_bkpt_type_access_wp:
559 {
560 enum target_hw_bp_type hw_type
561 = raw_bkpt_type_to_target_hw_bp_type (type);
562 struct x86_debug_reg_state *state
563 = &proc->priv->arch_private->debug_reg_state;
564
565 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
566 }
567
568 default:
569 /* Unsupported. */
570 return 1;
571 }
572 }
573
574 static int
575 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
576 int size, struct raw_breakpoint *bp)
577 {
578 struct process_info *proc = current_process ();
579
580 switch (type)
581 {
582 case raw_bkpt_type_hw:
583 case raw_bkpt_type_write_wp:
584 case raw_bkpt_type_access_wp:
585 {
586 enum target_hw_bp_type hw_type
587 = raw_bkpt_type_to_target_hw_bp_type (type);
588 struct x86_debug_reg_state *state
589 = &proc->priv->arch_private->debug_reg_state;
590
591 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
592 }
593 default:
594 /* Unsupported. */
595 return 1;
596 }
597 }
598
599 static int
600 x86_stopped_by_watchpoint (void)
601 {
602 struct process_info *proc = current_process ();
603 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
604 }
605
606 static CORE_ADDR
607 x86_stopped_data_address (void)
608 {
609 struct process_info *proc = current_process ();
610 CORE_ADDR addr;
611 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
612 &addr))
613 return addr;
614 return 0;
615 }
616 \f
617 /* Called when a new process is created. */
618
619 static struct arch_process_info *
620 x86_linux_new_process (void)
621 {
622 struct arch_process_info *info = XCNEW (struct arch_process_info);
623
624 x86_low_init_dregs (&info->debug_reg_state);
625
626 return info;
627 }
628
629 /* Target routine for linux_new_fork. */
630
631 static void
632 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
633 {
634 /* These are allocated by linux_add_process. */
635 gdb_assert (parent->priv != NULL
636 && parent->priv->arch_private != NULL);
637 gdb_assert (child->priv != NULL
638 && child->priv->arch_private != NULL);
639
640 /* Linux kernel before 2.6.33 commit
641 72f674d203cd230426437cdcf7dd6f681dad8b0d
642 will inherit hardware debug registers from parent
643 on fork/vfork/clone. Newer Linux kernels create such tasks with
644 zeroed debug registers.
645
646 GDB core assumes the child inherits the watchpoints/hw
647 breakpoints of the parent, and will remove them all from the
648 forked off process. Copy the debug registers mirrors into the
649 new process so that all breakpoints and watchpoints can be
650 removed together. The debug registers mirror will become zeroed
651 in the end before detaching the forked off process, thus making
652 this compatible with older Linux kernels too. */
653
654 *child->priv->arch_private = *parent->priv->arch_private;
655 }
656
657 /* See nat/x86-dregs.h. */
658
659 struct x86_debug_reg_state *
660 x86_debug_reg_state (pid_t pid)
661 {
662 struct process_info *proc = find_process_pid (pid);
663
664 return &proc->priv->arch_private->debug_reg_state;
665 }
666 \f
667 /* When GDBSERVER is built as a 64-bit application on linux, the
668 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
669 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
670 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
671 conversion in-place ourselves. */
672
673 /* These types below (compat_*) define a siginfo type that is layout
674 compatible with the siginfo type exported by the 32-bit userspace
675 support. */
676
677 #ifdef __x86_64__
678
679 typedef int compat_int_t;
680 typedef unsigned int compat_uptr_t;
681
682 typedef int compat_time_t;
683 typedef int compat_timer_t;
684 typedef int compat_clock_t;
685
686 struct compat_timeval
687 {
688 compat_time_t tv_sec;
689 int tv_usec;
690 };
691
692 typedef union compat_sigval
693 {
694 compat_int_t sival_int;
695 compat_uptr_t sival_ptr;
696 } compat_sigval_t;
697
698 typedef struct compat_siginfo
699 {
700 int si_signo;
701 int si_errno;
702 int si_code;
703
704 union
705 {
706 int _pad[((128 / sizeof (int)) - 3)];
707
708 /* kill() */
709 struct
710 {
711 unsigned int _pid;
712 unsigned int _uid;
713 } _kill;
714
715 /* POSIX.1b timers */
716 struct
717 {
718 compat_timer_t _tid;
719 int _overrun;
720 compat_sigval_t _sigval;
721 } _timer;
722
723 /* POSIX.1b signals */
724 struct
725 {
726 unsigned int _pid;
727 unsigned int _uid;
728 compat_sigval_t _sigval;
729 } _rt;
730
731 /* SIGCHLD */
732 struct
733 {
734 unsigned int _pid;
735 unsigned int _uid;
736 int _status;
737 compat_clock_t _utime;
738 compat_clock_t _stime;
739 } _sigchld;
740
741 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
742 struct
743 {
744 unsigned int _addr;
745 } _sigfault;
746
747 /* SIGPOLL */
748 struct
749 {
750 int _band;
751 int _fd;
752 } _sigpoll;
753 } _sifields;
754 } compat_siginfo_t;
755
756 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
757 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
758
759 typedef struct compat_x32_siginfo
760 {
761 int si_signo;
762 int si_errno;
763 int si_code;
764
765 union
766 {
767 int _pad[((128 / sizeof (int)) - 3)];
768
769 /* kill() */
770 struct
771 {
772 unsigned int _pid;
773 unsigned int _uid;
774 } _kill;
775
776 /* POSIX.1b timers */
777 struct
778 {
779 compat_timer_t _tid;
780 int _overrun;
781 compat_sigval_t _sigval;
782 } _timer;
783
784 /* POSIX.1b signals */
785 struct
786 {
787 unsigned int _pid;
788 unsigned int _uid;
789 compat_sigval_t _sigval;
790 } _rt;
791
792 /* SIGCHLD */
793 struct
794 {
795 unsigned int _pid;
796 unsigned int _uid;
797 int _status;
798 compat_x32_clock_t _utime;
799 compat_x32_clock_t _stime;
800 } _sigchld;
801
802 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
803 struct
804 {
805 unsigned int _addr;
806 } _sigfault;
807
808 /* SIGPOLL */
809 struct
810 {
811 int _band;
812 int _fd;
813 } _sigpoll;
814 } _sifields;
815 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
816
817 #define cpt_si_pid _sifields._kill._pid
818 #define cpt_si_uid _sifields._kill._uid
819 #define cpt_si_timerid _sifields._timer._tid
820 #define cpt_si_overrun _sifields._timer._overrun
821 #define cpt_si_status _sifields._sigchld._status
822 #define cpt_si_utime _sifields._sigchld._utime
823 #define cpt_si_stime _sifields._sigchld._stime
824 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
825 #define cpt_si_addr _sifields._sigfault._addr
826 #define cpt_si_band _sifields._sigpoll._band
827 #define cpt_si_fd _sifields._sigpoll._fd
828
829 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
830 In their place is si_timer1,si_timer2. */
831 #ifndef si_timerid
832 #define si_timerid si_timer1
833 #endif
834 #ifndef si_overrun
835 #define si_overrun si_timer2
836 #endif
837
838 static void
839 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
840 {
841 memset (to, 0, sizeof (*to));
842
843 to->si_signo = from->si_signo;
844 to->si_errno = from->si_errno;
845 to->si_code = from->si_code;
846
847 if (to->si_code == SI_TIMER)
848 {
849 to->cpt_si_timerid = from->si_timerid;
850 to->cpt_si_overrun = from->si_overrun;
851 to->cpt_si_ptr = (intptr_t) from->si_ptr;
852 }
853 else if (to->si_code == SI_USER)
854 {
855 to->cpt_si_pid = from->si_pid;
856 to->cpt_si_uid = from->si_uid;
857 }
858 else if (to->si_code < 0)
859 {
860 to->cpt_si_pid = from->si_pid;
861 to->cpt_si_uid = from->si_uid;
862 to->cpt_si_ptr = (intptr_t) from->si_ptr;
863 }
864 else
865 {
866 switch (to->si_signo)
867 {
868 case SIGCHLD:
869 to->cpt_si_pid = from->si_pid;
870 to->cpt_si_uid = from->si_uid;
871 to->cpt_si_status = from->si_status;
872 to->cpt_si_utime = from->si_utime;
873 to->cpt_si_stime = from->si_stime;
874 break;
875 case SIGILL:
876 case SIGFPE:
877 case SIGSEGV:
878 case SIGBUS:
879 to->cpt_si_addr = (intptr_t) from->si_addr;
880 break;
881 case SIGPOLL:
882 to->cpt_si_band = from->si_band;
883 to->cpt_si_fd = from->si_fd;
884 break;
885 default:
886 to->cpt_si_pid = from->si_pid;
887 to->cpt_si_uid = from->si_uid;
888 to->cpt_si_ptr = (intptr_t) from->si_ptr;
889 break;
890 }
891 }
892 }
893
894 static void
895 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
896 {
897 memset (to, 0, sizeof (*to));
898
899 to->si_signo = from->si_signo;
900 to->si_errno = from->si_errno;
901 to->si_code = from->si_code;
902
903 if (to->si_code == SI_TIMER)
904 {
905 to->si_timerid = from->cpt_si_timerid;
906 to->si_overrun = from->cpt_si_overrun;
907 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
908 }
909 else if (to->si_code == SI_USER)
910 {
911 to->si_pid = from->cpt_si_pid;
912 to->si_uid = from->cpt_si_uid;
913 }
914 else if (to->si_code < 0)
915 {
916 to->si_pid = from->cpt_si_pid;
917 to->si_uid = from->cpt_si_uid;
918 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
919 }
920 else
921 {
922 switch (to->si_signo)
923 {
924 case SIGCHLD:
925 to->si_pid = from->cpt_si_pid;
926 to->si_uid = from->cpt_si_uid;
927 to->si_status = from->cpt_si_status;
928 to->si_utime = from->cpt_si_utime;
929 to->si_stime = from->cpt_si_stime;
930 break;
931 case SIGILL:
932 case SIGFPE:
933 case SIGSEGV:
934 case SIGBUS:
935 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
936 break;
937 case SIGPOLL:
938 to->si_band = from->cpt_si_band;
939 to->si_fd = from->cpt_si_fd;
940 break;
941 default:
942 to->si_pid = from->cpt_si_pid;
943 to->si_uid = from->cpt_si_uid;
944 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
945 break;
946 }
947 }
948 }
949
950 static void
951 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
952 siginfo_t *from)
953 {
954 memset (to, 0, sizeof (*to));
955
956 to->si_signo = from->si_signo;
957 to->si_errno = from->si_errno;
958 to->si_code = from->si_code;
959
960 if (to->si_code == SI_TIMER)
961 {
962 to->cpt_si_timerid = from->si_timerid;
963 to->cpt_si_overrun = from->si_overrun;
964 to->cpt_si_ptr = (intptr_t) from->si_ptr;
965 }
966 else if (to->si_code == SI_USER)
967 {
968 to->cpt_si_pid = from->si_pid;
969 to->cpt_si_uid = from->si_uid;
970 }
971 else if (to->si_code < 0)
972 {
973 to->cpt_si_pid = from->si_pid;
974 to->cpt_si_uid = from->si_uid;
975 to->cpt_si_ptr = (intptr_t) from->si_ptr;
976 }
977 else
978 {
979 switch (to->si_signo)
980 {
981 case SIGCHLD:
982 to->cpt_si_pid = from->si_pid;
983 to->cpt_si_uid = from->si_uid;
984 to->cpt_si_status = from->si_status;
985 to->cpt_si_utime = from->si_utime;
986 to->cpt_si_stime = from->si_stime;
987 break;
988 case SIGILL:
989 case SIGFPE:
990 case SIGSEGV:
991 case SIGBUS:
992 to->cpt_si_addr = (intptr_t) from->si_addr;
993 break;
994 case SIGPOLL:
995 to->cpt_si_band = from->si_band;
996 to->cpt_si_fd = from->si_fd;
997 break;
998 default:
999 to->cpt_si_pid = from->si_pid;
1000 to->cpt_si_uid = from->si_uid;
1001 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1002 break;
1003 }
1004 }
1005 }
1006
1007 static void
1008 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1009 compat_x32_siginfo_t *from)
1010 {
1011 memset (to, 0, sizeof (*to));
1012
1013 to->si_signo = from->si_signo;
1014 to->si_errno = from->si_errno;
1015 to->si_code = from->si_code;
1016
1017 if (to->si_code == SI_TIMER)
1018 {
1019 to->si_timerid = from->cpt_si_timerid;
1020 to->si_overrun = from->cpt_si_overrun;
1021 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1022 }
1023 else if (to->si_code == SI_USER)
1024 {
1025 to->si_pid = from->cpt_si_pid;
1026 to->si_uid = from->cpt_si_uid;
1027 }
1028 else if (to->si_code < 0)
1029 {
1030 to->si_pid = from->cpt_si_pid;
1031 to->si_uid = from->cpt_si_uid;
1032 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1033 }
1034 else
1035 {
1036 switch (to->si_signo)
1037 {
1038 case SIGCHLD:
1039 to->si_pid = from->cpt_si_pid;
1040 to->si_uid = from->cpt_si_uid;
1041 to->si_status = from->cpt_si_status;
1042 to->si_utime = from->cpt_si_utime;
1043 to->si_stime = from->cpt_si_stime;
1044 break;
1045 case SIGILL:
1046 case SIGFPE:
1047 case SIGSEGV:
1048 case SIGBUS:
1049 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1050 break;
1051 case SIGPOLL:
1052 to->si_band = from->cpt_si_band;
1053 to->si_fd = from->cpt_si_fd;
1054 break;
1055 default:
1056 to->si_pid = from->cpt_si_pid;
1057 to->si_uid = from->cpt_si_uid;
1058 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1059 break;
1060 }
1061 }
1062 }
1063
1064 #endif /* __x86_64__ */
1065
1066 /* Convert a native/host siginfo object, into/from the siginfo in the
1067 layout of the inferiors' architecture. Returns true if any
1068 conversion was done; false otherwise. If DIRECTION is 1, then copy
1069 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1070 INF. */
1071
1072 static int
1073 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1074 {
1075 #ifdef __x86_64__
1076 unsigned int machine;
1077 int tid = lwpid_of (current_thread);
1078 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1079
1080 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1081 if (!is_64bit_tdesc ())
1082 {
1083 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1084
1085 if (direction == 0)
1086 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1087 else
1088 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1089
1090 return 1;
1091 }
1092 /* No fixup for native x32 GDB. */
1093 else if (!is_elf64 && sizeof (void *) == 8)
1094 {
1095 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1096
1097 if (direction == 0)
1098 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1099 native);
1100 else
1101 siginfo_from_compat_x32_siginfo (native,
1102 (struct compat_x32_siginfo *) inf);
1103
1104 return 1;
1105 }
1106 #endif
1107
1108 return 0;
1109 }
1110 \f
1111 static int use_xml;
1112
1113 /* Format of XSAVE extended state is:
1114 struct
1115 {
1116 fxsave_bytes[0..463]
1117 sw_usable_bytes[464..511]
1118 xstate_hdr_bytes[512..575]
1119 avx_bytes[576..831]
1120 future_state etc
1121 };
1122
1123 Same memory layout will be used for the coredump NT_X86_XSTATE
1124 representing the XSAVE extended state registers.
1125
1126 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1127 extended state mask, which is the same as the extended control register
1128 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1129 together with the mask saved in the xstate_hdr_bytes to determine what
1130 states the processor/OS supports and what state, used or initialized,
1131 the process/thread is in. */
1132 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1133
1134 /* Does the current host support the GETFPXREGS request? The header
1135 file may or may not define it, and even if it is defined, the
1136 kernel will return EIO if it's running on a pre-SSE processor. */
1137 int have_ptrace_getfpxregs =
1138 #ifdef HAVE_PTRACE_GETFPXREGS
1139 -1
1140 #else
1141 0
1142 #endif
1143 ;
1144
1145 /* Does the current host support PTRACE_GETREGSET? */
1146 static int have_ptrace_getregset = -1;
1147
1148 /* Get Linux/x86 target description from running target. */
1149
1150 static const struct target_desc *
1151 x86_linux_read_description (void)
1152 {
1153 unsigned int machine;
1154 int is_elf64;
1155 int xcr0_features;
1156 int tid;
1157 static uint64_t xcr0;
1158 struct regset_info *regset;
1159
1160 tid = lwpid_of (current_thread);
1161
1162 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1163
1164 if (sizeof (void *) == 4)
1165 {
1166 if (is_elf64 > 0)
1167 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1168 #ifndef __x86_64__
1169 else if (machine == EM_X86_64)
1170 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1171 #endif
1172 }
1173
1174 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1175 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1176 {
1177 elf_fpxregset_t fpxregs;
1178
1179 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1180 {
1181 have_ptrace_getfpxregs = 0;
1182 have_ptrace_getregset = 0;
1183 return tdesc_i386_mmx_linux;
1184 }
1185 else
1186 have_ptrace_getfpxregs = 1;
1187 }
1188 #endif
1189
1190 if (!use_xml)
1191 {
1192 x86_xcr0 = X86_XSTATE_SSE_MASK;
1193
1194 /* Don't use XML. */
1195 #ifdef __x86_64__
1196 if (machine == EM_X86_64)
1197 return tdesc_amd64_linux_no_xml;
1198 else
1199 #endif
1200 return tdesc_i386_linux_no_xml;
1201 }
1202
1203 if (have_ptrace_getregset == -1)
1204 {
1205 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1206 struct iovec iov;
1207
1208 iov.iov_base = xstateregs;
1209 iov.iov_len = sizeof (xstateregs);
1210
1211 /* Check if PTRACE_GETREGSET works. */
1212 if (ptrace (PTRACE_GETREGSET, tid,
1213 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1214 have_ptrace_getregset = 0;
1215 else
1216 {
1217 have_ptrace_getregset = 1;
1218
1219 /* Get XCR0 from XSAVE extended state. */
1220 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1221 / sizeof (uint64_t))];
1222
1223 /* Use PTRACE_GETREGSET if it is available. */
1224 for (regset = x86_regsets;
1225 regset->fill_function != NULL; regset++)
1226 if (regset->get_request == PTRACE_GETREGSET)
1227 regset->size = X86_XSTATE_SIZE (xcr0);
1228 else if (regset->type != GENERAL_REGS)
1229 regset->size = 0;
1230 }
1231 }
1232
1233 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1234 xcr0_features = (have_ptrace_getregset
1235 && (xcr0 & X86_XSTATE_ALL_MASK));
1236
1237 if (xcr0_features)
1238 x86_xcr0 = xcr0;
1239
1240 if (machine == EM_X86_64)
1241 {
1242 #ifdef __x86_64__
1243 if (is_elf64)
1244 {
1245 if (xcr0_features)
1246 {
1247 switch (xcr0 & X86_XSTATE_ALL_MASK)
1248 {
1249 case X86_XSTATE_AVX512_MASK:
1250 return tdesc_amd64_avx512_linux;
1251
1252 case X86_XSTATE_MPX_MASK:
1253 return tdesc_amd64_mpx_linux;
1254
1255 case X86_XSTATE_AVX_MASK:
1256 return tdesc_amd64_avx_linux;
1257
1258 default:
1259 return tdesc_amd64_linux;
1260 }
1261 }
1262 else
1263 return tdesc_amd64_linux;
1264 }
1265 else
1266 {
1267 if (xcr0_features)
1268 {
1269 switch (xcr0 & X86_XSTATE_ALL_MASK)
1270 {
1271 case X86_XSTATE_AVX512_MASK:
1272 return tdesc_x32_avx512_linux;
1273
1274 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1275 case X86_XSTATE_AVX_MASK:
1276 return tdesc_x32_avx_linux;
1277
1278 default:
1279 return tdesc_x32_linux;
1280 }
1281 }
1282 else
1283 return tdesc_x32_linux;
1284 }
1285 #endif
1286 }
1287 else
1288 {
1289 if (xcr0_features)
1290 {
1291 switch (xcr0 & X86_XSTATE_ALL_MASK)
1292 {
1293 case (X86_XSTATE_AVX512_MASK):
1294 return tdesc_i386_avx512_linux;
1295
1296 case (X86_XSTATE_MPX_MASK):
1297 return tdesc_i386_mpx_linux;
1298
1299 case (X86_XSTATE_AVX_MASK):
1300 return tdesc_i386_avx_linux;
1301
1302 default:
1303 return tdesc_i386_linux;
1304 }
1305 }
1306 else
1307 return tdesc_i386_linux;
1308 }
1309
1310 gdb_assert_not_reached ("failed to return tdesc");
1311 }
1312
1313 /* Callback for find_inferior. Stops iteration when a thread with a
1314 given PID is found. */
1315
1316 static int
1317 same_process_callback (struct inferior_list_entry *entry, void *data)
1318 {
1319 int pid = *(int *) data;
1320
1321 return (ptid_get_pid (entry->id) == pid);
1322 }
1323
1324 /* Callback for for_each_inferior. Calls the arch_setup routine for
1325 each process. */
1326
1327 static void
1328 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1329 {
1330 int pid = ptid_get_pid (entry->id);
1331
1332 /* Look up any thread of this processes. */
1333 current_thread
1334 = (struct thread_info *) find_inferior (&all_threads,
1335 same_process_callback, &pid);
1336
1337 the_low_target.arch_setup ();
1338 }
1339
1340 /* Update all the target description of all processes; a new GDB
1341 connected, and it may or not support xml target descriptions. */
1342
1343 static void
1344 x86_linux_update_xmltarget (void)
1345 {
1346 struct thread_info *saved_thread = current_thread;
1347
1348 /* Before changing the register cache's internal layout, flush the
1349 contents of the current valid caches back to the threads, and
1350 release the current regcache objects. */
1351 regcache_release ();
1352
1353 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1354
1355 current_thread = saved_thread;
1356 }
1357
1358 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1359 PTRACE_GETREGSET. */
1360
1361 static void
1362 x86_linux_process_qsupported (const char *query)
1363 {
1364 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1365 with "i386" in qSupported query, it supports x86 XML target
1366 descriptions. */
1367 use_xml = 0;
1368 if (query != NULL && startswith (query, "xmlRegisters="))
1369 {
1370 char *copy = xstrdup (query + 13);
1371 char *p;
1372
1373 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1374 {
1375 if (strcmp (p, "i386") == 0)
1376 {
1377 use_xml = 1;
1378 break;
1379 }
1380 }
1381
1382 free (copy);
1383 }
1384
1385 x86_linux_update_xmltarget ();
1386 }
1387
1388 /* Common for x86/x86-64. */
1389
1390 static struct regsets_info x86_regsets_info =
1391 {
1392 x86_regsets, /* regsets */
1393 0, /* num_regsets */
1394 NULL, /* disabled_regsets */
1395 };
1396
1397 #ifdef __x86_64__
1398 static struct regs_info amd64_linux_regs_info =
1399 {
1400 NULL, /* regset_bitmap */
1401 NULL, /* usrregs_info */
1402 &x86_regsets_info
1403 };
1404 #endif
1405 static struct usrregs_info i386_linux_usrregs_info =
1406 {
1407 I386_NUM_REGS,
1408 i386_regmap,
1409 };
1410
1411 static struct regs_info i386_linux_regs_info =
1412 {
1413 NULL, /* regset_bitmap */
1414 &i386_linux_usrregs_info,
1415 &x86_regsets_info
1416 };
1417
1418 const struct regs_info *
1419 x86_linux_regs_info (void)
1420 {
1421 #ifdef __x86_64__
1422 if (is_64bit_tdesc ())
1423 return &amd64_linux_regs_info;
1424 else
1425 #endif
1426 return &i386_linux_regs_info;
1427 }
1428
1429 /* Initialize the target description for the architecture of the
1430 inferior. */
1431
1432 static void
1433 x86_arch_setup (void)
1434 {
1435 current_process ()->tdesc = x86_linux_read_description ();
1436 }
1437
1438 static int
1439 x86_supports_tracepoints (void)
1440 {
1441 return 1;
1442 }
1443
1444 static void
1445 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1446 {
1447 write_inferior_memory (*to, buf, len);
1448 *to += len;
1449 }
1450
1451 static int
1452 push_opcode (unsigned char *buf, char *op)
1453 {
1454 unsigned char *buf_org = buf;
1455
1456 while (1)
1457 {
1458 char *endptr;
1459 unsigned long ul = strtoul (op, &endptr, 16);
1460
1461 if (endptr == op)
1462 break;
1463
1464 *buf++ = ul;
1465 op = endptr;
1466 }
1467
1468 return buf - buf_org;
1469 }
1470
1471 #ifdef __x86_64__
1472
1473 /* Build a jump pad that saves registers and calls a collection
1474 function. Writes a jump instruction to the jump pad to
1475 JJUMPAD_INSN. The caller is responsible to write it in at the
1476 tracepoint address. */
1477
1478 static int
1479 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1480 CORE_ADDR collector,
1481 CORE_ADDR lockaddr,
1482 ULONGEST orig_size,
1483 CORE_ADDR *jump_entry,
1484 CORE_ADDR *trampoline,
1485 ULONGEST *trampoline_size,
1486 unsigned char *jjump_pad_insn,
1487 ULONGEST *jjump_pad_insn_size,
1488 CORE_ADDR *adjusted_insn_addr,
1489 CORE_ADDR *adjusted_insn_addr_end,
1490 char *err)
1491 {
1492 unsigned char buf[40];
1493 int i, offset;
1494 int64_t loffset;
1495
1496 CORE_ADDR buildaddr = *jump_entry;
1497
1498 /* Build the jump pad. */
1499
1500 /* First, do tracepoint data collection. Save registers. */
1501 i = 0;
1502 /* Need to ensure stack pointer saved first. */
1503 buf[i++] = 0x54; /* push %rsp */
1504 buf[i++] = 0x55; /* push %rbp */
1505 buf[i++] = 0x57; /* push %rdi */
1506 buf[i++] = 0x56; /* push %rsi */
1507 buf[i++] = 0x52; /* push %rdx */
1508 buf[i++] = 0x51; /* push %rcx */
1509 buf[i++] = 0x53; /* push %rbx */
1510 buf[i++] = 0x50; /* push %rax */
1511 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1512 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1513 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1514 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1515 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1516 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1517 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1518 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1519 buf[i++] = 0x9c; /* pushfq */
1520 buf[i++] = 0x48; /* movl <addr>,%rdi */
1521 buf[i++] = 0xbf;
1522 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1523 i += sizeof (unsigned long);
1524 buf[i++] = 0x57; /* push %rdi */
1525 append_insns (&buildaddr, i, buf);
1526
1527 /* Stack space for the collecting_t object. */
1528 i = 0;
1529 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1530 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1531 memcpy (buf + i, &tpoint, 8);
1532 i += 8;
1533 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1534 i += push_opcode (&buf[i],
1535 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1536 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1537 append_insns (&buildaddr, i, buf);
1538
1539 /* spin-lock. */
1540 i = 0;
1541 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1542 memcpy (&buf[i], (void *) &lockaddr, 8);
1543 i += 8;
1544 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1545 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1546 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1547 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1548 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1549 append_insns (&buildaddr, i, buf);
1550
1551 /* Set up the gdb_collect call. */
1552 /* At this point, (stack pointer + 0x18) is the base of our saved
1553 register block. */
1554
1555 i = 0;
1556 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1557 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1558
1559 /* tpoint address may be 64-bit wide. */
1560 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1561 memcpy (buf + i, &tpoint, 8);
1562 i += 8;
1563 append_insns (&buildaddr, i, buf);
1564
1565 /* The collector function being in the shared library, may be
1566 >31-bits away off the jump pad. */
1567 i = 0;
1568 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1569 memcpy (buf + i, &collector, 8);
1570 i += 8;
1571 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1572 append_insns (&buildaddr, i, buf);
1573
1574 /* Clear the spin-lock. */
1575 i = 0;
1576 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1577 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1578 memcpy (buf + i, &lockaddr, 8);
1579 i += 8;
1580 append_insns (&buildaddr, i, buf);
1581
1582 /* Remove stack that had been used for the collect_t object. */
1583 i = 0;
1584 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1585 append_insns (&buildaddr, i, buf);
1586
1587 /* Restore register state. */
1588 i = 0;
1589 buf[i++] = 0x48; /* add $0x8,%rsp */
1590 buf[i++] = 0x83;
1591 buf[i++] = 0xc4;
1592 buf[i++] = 0x08;
1593 buf[i++] = 0x9d; /* popfq */
1594 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1595 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1596 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1597 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1598 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1599 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1600 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1601 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1602 buf[i++] = 0x58; /* pop %rax */
1603 buf[i++] = 0x5b; /* pop %rbx */
1604 buf[i++] = 0x59; /* pop %rcx */
1605 buf[i++] = 0x5a; /* pop %rdx */
1606 buf[i++] = 0x5e; /* pop %rsi */
1607 buf[i++] = 0x5f; /* pop %rdi */
1608 buf[i++] = 0x5d; /* pop %rbp */
1609 buf[i++] = 0x5c; /* pop %rsp */
1610 append_insns (&buildaddr, i, buf);
1611
1612 /* Now, adjust the original instruction to execute in the jump
1613 pad. */
1614 *adjusted_insn_addr = buildaddr;
1615 relocate_instruction (&buildaddr, tpaddr);
1616 *adjusted_insn_addr_end = buildaddr;
1617
1618 /* Finally, write a jump back to the program. */
1619
1620 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1621 if (loffset > INT_MAX || loffset < INT_MIN)
1622 {
1623 sprintf (err,
1624 "E.Jump back from jump pad too far from tracepoint "
1625 "(offset 0x%" PRIx64 " > int32).", loffset);
1626 return 1;
1627 }
1628
1629 offset = (int) loffset;
1630 memcpy (buf, jump_insn, sizeof (jump_insn));
1631 memcpy (buf + 1, &offset, 4);
1632 append_insns (&buildaddr, sizeof (jump_insn), buf);
1633
1634 /* The jump pad is now built. Wire in a jump to our jump pad. This
1635 is always done last (by our caller actually), so that we can
1636 install fast tracepoints with threads running. This relies on
1637 the agent's atomic write support. */
1638 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1639 if (loffset > INT_MAX || loffset < INT_MIN)
1640 {
1641 sprintf (err,
1642 "E.Jump pad too far from tracepoint "
1643 "(offset 0x%" PRIx64 " > int32).", loffset);
1644 return 1;
1645 }
1646
1647 offset = (int) loffset;
1648
1649 memcpy (buf, jump_insn, sizeof (jump_insn));
1650 memcpy (buf + 1, &offset, 4);
1651 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1652 *jjump_pad_insn_size = sizeof (jump_insn);
1653
1654 /* Return the end address of our pad. */
1655 *jump_entry = buildaddr;
1656
1657 return 0;
1658 }
1659
1660 #endif /* __x86_64__ */
1661
1662 /* Build a jump pad that saves registers and calls a collection
1663 function. Writes a jump instruction to the jump pad to
1664 JJUMPAD_INSN. The caller is responsible to write it in at the
1665 tracepoint address. */
1666
1667 static int
1668 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1669 CORE_ADDR collector,
1670 CORE_ADDR lockaddr,
1671 ULONGEST orig_size,
1672 CORE_ADDR *jump_entry,
1673 CORE_ADDR *trampoline,
1674 ULONGEST *trampoline_size,
1675 unsigned char *jjump_pad_insn,
1676 ULONGEST *jjump_pad_insn_size,
1677 CORE_ADDR *adjusted_insn_addr,
1678 CORE_ADDR *adjusted_insn_addr_end,
1679 char *err)
1680 {
1681 unsigned char buf[0x100];
1682 int i, offset;
1683 CORE_ADDR buildaddr = *jump_entry;
1684
1685 /* Build the jump pad. */
1686
1687 /* First, do tracepoint data collection. Save registers. */
1688 i = 0;
1689 buf[i++] = 0x60; /* pushad */
1690 buf[i++] = 0x68; /* push tpaddr aka $pc */
1691 *((int *)(buf + i)) = (int) tpaddr;
1692 i += 4;
1693 buf[i++] = 0x9c; /* pushf */
1694 buf[i++] = 0x1e; /* push %ds */
1695 buf[i++] = 0x06; /* push %es */
1696 buf[i++] = 0x0f; /* push %fs */
1697 buf[i++] = 0xa0;
1698 buf[i++] = 0x0f; /* push %gs */
1699 buf[i++] = 0xa8;
1700 buf[i++] = 0x16; /* push %ss */
1701 buf[i++] = 0x0e; /* push %cs */
1702 append_insns (&buildaddr, i, buf);
1703
1704 /* Stack space for the collecting_t object. */
1705 i = 0;
1706 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1707
1708 /* Build the object. */
1709 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1710 memcpy (buf + i, &tpoint, 4);
1711 i += 4;
1712 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1713
1714 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1715 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1716 append_insns (&buildaddr, i, buf);
1717
1718 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1719 If we cared for it, this could be using xchg alternatively. */
1720
1721 i = 0;
1722 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1723 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1724 %esp,<lockaddr> */
1725 memcpy (&buf[i], (void *) &lockaddr, 4);
1726 i += 4;
1727 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1728 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1729 append_insns (&buildaddr, i, buf);
1730
1731
1732 /* Set up arguments to the gdb_collect call. */
1733 i = 0;
1734 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1735 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1736 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1737 append_insns (&buildaddr, i, buf);
1738
1739 i = 0;
1740 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1741 append_insns (&buildaddr, i, buf);
1742
1743 i = 0;
1744 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1745 memcpy (&buf[i], (void *) &tpoint, 4);
1746 i += 4;
1747 append_insns (&buildaddr, i, buf);
1748
1749 buf[0] = 0xe8; /* call <reladdr> */
1750 offset = collector - (buildaddr + sizeof (jump_insn));
1751 memcpy (buf + 1, &offset, 4);
1752 append_insns (&buildaddr, 5, buf);
1753 /* Clean up after the call. */
1754 buf[0] = 0x83; /* add $0x8,%esp */
1755 buf[1] = 0xc4;
1756 buf[2] = 0x08;
1757 append_insns (&buildaddr, 3, buf);
1758
1759
1760 /* Clear the spin-lock. This would need the LOCK prefix on older
1761 broken archs. */
1762 i = 0;
1763 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1764 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1765 memcpy (buf + i, &lockaddr, 4);
1766 i += 4;
1767 append_insns (&buildaddr, i, buf);
1768
1769
1770 /* Remove stack that had been used for the collect_t object. */
1771 i = 0;
1772 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1773 append_insns (&buildaddr, i, buf);
1774
1775 i = 0;
1776 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1777 buf[i++] = 0xc4;
1778 buf[i++] = 0x04;
1779 buf[i++] = 0x17; /* pop %ss */
1780 buf[i++] = 0x0f; /* pop %gs */
1781 buf[i++] = 0xa9;
1782 buf[i++] = 0x0f; /* pop %fs */
1783 buf[i++] = 0xa1;
1784 buf[i++] = 0x07; /* pop %es */
1785 buf[i++] = 0x1f; /* pop %ds */
1786 buf[i++] = 0x9d; /* popf */
1787 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1788 buf[i++] = 0xc4;
1789 buf[i++] = 0x04;
1790 buf[i++] = 0x61; /* popad */
1791 append_insns (&buildaddr, i, buf);
1792
1793 /* Now, adjust the original instruction to execute in the jump
1794 pad. */
1795 *adjusted_insn_addr = buildaddr;
1796 relocate_instruction (&buildaddr, tpaddr);
1797 *adjusted_insn_addr_end = buildaddr;
1798
1799 /* Write the jump back to the program. */
1800 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1801 memcpy (buf, jump_insn, sizeof (jump_insn));
1802 memcpy (buf + 1, &offset, 4);
1803 append_insns (&buildaddr, sizeof (jump_insn), buf);
1804
1805 /* The jump pad is now built. Wire in a jump to our jump pad. This
1806 is always done last (by our caller actually), so that we can
1807 install fast tracepoints with threads running. This relies on
1808 the agent's atomic write support. */
1809 if (orig_size == 4)
1810 {
1811 /* Create a trampoline. */
1812 *trampoline_size = sizeof (jump_insn);
1813 if (!claim_trampoline_space (*trampoline_size, trampoline))
1814 {
1815 /* No trampoline space available. */
1816 strcpy (err,
1817 "E.Cannot allocate trampoline space needed for fast "
1818 "tracepoints on 4-byte instructions.");
1819 return 1;
1820 }
1821
1822 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1823 memcpy (buf, jump_insn, sizeof (jump_insn));
1824 memcpy (buf + 1, &offset, 4);
1825 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1826
1827 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1828 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1829 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1830 memcpy (buf + 2, &offset, 2);
1831 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1832 *jjump_pad_insn_size = sizeof (small_jump_insn);
1833 }
1834 else
1835 {
1836 /* Else use a 32-bit relative jump instruction. */
1837 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1838 memcpy (buf, jump_insn, sizeof (jump_insn));
1839 memcpy (buf + 1, &offset, 4);
1840 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1841 *jjump_pad_insn_size = sizeof (jump_insn);
1842 }
1843
1844 /* Return the end address of our pad. */
1845 *jump_entry = buildaddr;
1846
1847 return 0;
1848 }
1849
1850 static int
1851 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1852 CORE_ADDR collector,
1853 CORE_ADDR lockaddr,
1854 ULONGEST orig_size,
1855 CORE_ADDR *jump_entry,
1856 CORE_ADDR *trampoline,
1857 ULONGEST *trampoline_size,
1858 unsigned char *jjump_pad_insn,
1859 ULONGEST *jjump_pad_insn_size,
1860 CORE_ADDR *adjusted_insn_addr,
1861 CORE_ADDR *adjusted_insn_addr_end,
1862 char *err)
1863 {
1864 #ifdef __x86_64__
1865 if (is_64bit_tdesc ())
1866 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1867 collector, lockaddr,
1868 orig_size, jump_entry,
1869 trampoline, trampoline_size,
1870 jjump_pad_insn,
1871 jjump_pad_insn_size,
1872 adjusted_insn_addr,
1873 adjusted_insn_addr_end,
1874 err);
1875 #endif
1876
1877 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1878 collector, lockaddr,
1879 orig_size, jump_entry,
1880 trampoline, trampoline_size,
1881 jjump_pad_insn,
1882 jjump_pad_insn_size,
1883 adjusted_insn_addr,
1884 adjusted_insn_addr_end,
1885 err);
1886 }
1887
1888 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1889 architectures. */
1890
1891 static int
1892 x86_get_min_fast_tracepoint_insn_len (void)
1893 {
1894 static int warned_about_fast_tracepoints = 0;
1895
1896 #ifdef __x86_64__
1897 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1898 used for fast tracepoints. */
1899 if (is_64bit_tdesc ())
1900 return 5;
1901 #endif
1902
1903 if (agent_loaded_p ())
1904 {
1905 char errbuf[IPA_BUFSIZ];
1906
1907 errbuf[0] = '\0';
1908
1909 /* On x86, if trampolines are available, then 4-byte jump instructions
1910 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1911 with a 4-byte offset are used instead. */
1912 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1913 return 4;
1914 else
1915 {
1916 /* GDB has no channel to explain to user why a shorter fast
1917 tracepoint is not possible, but at least make GDBserver
1918 mention that something has gone awry. */
1919 if (!warned_about_fast_tracepoints)
1920 {
1921 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1922 warned_about_fast_tracepoints = 1;
1923 }
1924 return 5;
1925 }
1926 }
1927 else
1928 {
1929 /* Indicate that the minimum length is currently unknown since the IPA
1930 has not loaded yet. */
1931 return 0;
1932 }
1933 }
1934
1935 static void
1936 add_insns (unsigned char *start, int len)
1937 {
1938 CORE_ADDR buildaddr = current_insn_ptr;
1939
1940 if (debug_threads)
1941 debug_printf ("Adding %d bytes of insn at %s\n",
1942 len, paddress (buildaddr));
1943
1944 append_insns (&buildaddr, len, start);
1945 current_insn_ptr = buildaddr;
1946 }
1947
1948 /* Our general strategy for emitting code is to avoid specifying raw
1949 bytes whenever possible, and instead copy a block of inline asm
1950 that is embedded in the function. This is a little messy, because
1951 we need to keep the compiler from discarding what looks like dead
1952 code, plus suppress various warnings. */
1953
1954 #define EMIT_ASM(NAME, INSNS) \
1955 do \
1956 { \
1957 extern unsigned char start_ ## NAME, end_ ## NAME; \
1958 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1959 __asm__ ("jmp end_" #NAME "\n" \
1960 "\t" "start_" #NAME ":" \
1961 "\t" INSNS "\n" \
1962 "\t" "end_" #NAME ":"); \
1963 } while (0)
1964
1965 #ifdef __x86_64__
1966
1967 #define EMIT_ASM32(NAME,INSNS) \
1968 do \
1969 { \
1970 extern unsigned char start_ ## NAME, end_ ## NAME; \
1971 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1972 __asm__ (".code32\n" \
1973 "\t" "jmp end_" #NAME "\n" \
1974 "\t" "start_" #NAME ":\n" \
1975 "\t" INSNS "\n" \
1976 "\t" "end_" #NAME ":\n" \
1977 ".code64\n"); \
1978 } while (0)
1979
1980 #else
1981
1982 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1983
1984 #endif
1985
1986 #ifdef __x86_64__
1987
1988 static void
1989 amd64_emit_prologue (void)
1990 {
1991 EMIT_ASM (amd64_prologue,
1992 "pushq %rbp\n\t"
1993 "movq %rsp,%rbp\n\t"
1994 "sub $0x20,%rsp\n\t"
1995 "movq %rdi,-8(%rbp)\n\t"
1996 "movq %rsi,-16(%rbp)");
1997 }
1998
1999
2000 static void
2001 amd64_emit_epilogue (void)
2002 {
2003 EMIT_ASM (amd64_epilogue,
2004 "movq -16(%rbp),%rdi\n\t"
2005 "movq %rax,(%rdi)\n\t"
2006 "xor %rax,%rax\n\t"
2007 "leave\n\t"
2008 "ret");
2009 }
2010
2011 static void
2012 amd64_emit_add (void)
2013 {
2014 EMIT_ASM (amd64_add,
2015 "add (%rsp),%rax\n\t"
2016 "lea 0x8(%rsp),%rsp");
2017 }
2018
2019 static void
2020 amd64_emit_sub (void)
2021 {
2022 EMIT_ASM (amd64_sub,
2023 "sub %rax,(%rsp)\n\t"
2024 "pop %rax");
2025 }
2026
2027 static void
2028 amd64_emit_mul (void)
2029 {
2030 emit_error = 1;
2031 }
2032
2033 static void
2034 amd64_emit_lsh (void)
2035 {
2036 emit_error = 1;
2037 }
2038
2039 static void
2040 amd64_emit_rsh_signed (void)
2041 {
2042 emit_error = 1;
2043 }
2044
2045 static void
2046 amd64_emit_rsh_unsigned (void)
2047 {
2048 emit_error = 1;
2049 }
2050
2051 static void
2052 amd64_emit_ext (int arg)
2053 {
2054 switch (arg)
2055 {
2056 case 8:
2057 EMIT_ASM (amd64_ext_8,
2058 "cbtw\n\t"
2059 "cwtl\n\t"
2060 "cltq");
2061 break;
2062 case 16:
2063 EMIT_ASM (amd64_ext_16,
2064 "cwtl\n\t"
2065 "cltq");
2066 break;
2067 case 32:
2068 EMIT_ASM (amd64_ext_32,
2069 "cltq");
2070 break;
2071 default:
2072 emit_error = 1;
2073 }
2074 }
2075
2076 static void
2077 amd64_emit_log_not (void)
2078 {
2079 EMIT_ASM (amd64_log_not,
2080 "test %rax,%rax\n\t"
2081 "sete %cl\n\t"
2082 "movzbq %cl,%rax");
2083 }
2084
2085 static void
2086 amd64_emit_bit_and (void)
2087 {
2088 EMIT_ASM (amd64_and,
2089 "and (%rsp),%rax\n\t"
2090 "lea 0x8(%rsp),%rsp");
2091 }
2092
2093 static void
2094 amd64_emit_bit_or (void)
2095 {
2096 EMIT_ASM (amd64_or,
2097 "or (%rsp),%rax\n\t"
2098 "lea 0x8(%rsp),%rsp");
2099 }
2100
2101 static void
2102 amd64_emit_bit_xor (void)
2103 {
2104 EMIT_ASM (amd64_xor,
2105 "xor (%rsp),%rax\n\t"
2106 "lea 0x8(%rsp),%rsp");
2107 }
2108
2109 static void
2110 amd64_emit_bit_not (void)
2111 {
2112 EMIT_ASM (amd64_bit_not,
2113 "xorq $0xffffffffffffffff,%rax");
2114 }
2115
2116 static void
2117 amd64_emit_equal (void)
2118 {
2119 EMIT_ASM (amd64_equal,
2120 "cmp %rax,(%rsp)\n\t"
2121 "je .Lamd64_equal_true\n\t"
2122 "xor %rax,%rax\n\t"
2123 "jmp .Lamd64_equal_end\n\t"
2124 ".Lamd64_equal_true:\n\t"
2125 "mov $0x1,%rax\n\t"
2126 ".Lamd64_equal_end:\n\t"
2127 "lea 0x8(%rsp),%rsp");
2128 }
2129
2130 static void
2131 amd64_emit_less_signed (void)
2132 {
2133 EMIT_ASM (amd64_less_signed,
2134 "cmp %rax,(%rsp)\n\t"
2135 "jl .Lamd64_less_signed_true\n\t"
2136 "xor %rax,%rax\n\t"
2137 "jmp .Lamd64_less_signed_end\n\t"
2138 ".Lamd64_less_signed_true:\n\t"
2139 "mov $1,%rax\n\t"
2140 ".Lamd64_less_signed_end:\n\t"
2141 "lea 0x8(%rsp),%rsp");
2142 }
2143
2144 static void
2145 amd64_emit_less_unsigned (void)
2146 {
2147 EMIT_ASM (amd64_less_unsigned,
2148 "cmp %rax,(%rsp)\n\t"
2149 "jb .Lamd64_less_unsigned_true\n\t"
2150 "xor %rax,%rax\n\t"
2151 "jmp .Lamd64_less_unsigned_end\n\t"
2152 ".Lamd64_less_unsigned_true:\n\t"
2153 "mov $1,%rax\n\t"
2154 ".Lamd64_less_unsigned_end:\n\t"
2155 "lea 0x8(%rsp),%rsp");
2156 }
2157
2158 static void
2159 amd64_emit_ref (int size)
2160 {
2161 switch (size)
2162 {
2163 case 1:
2164 EMIT_ASM (amd64_ref1,
2165 "movb (%rax),%al");
2166 break;
2167 case 2:
2168 EMIT_ASM (amd64_ref2,
2169 "movw (%rax),%ax");
2170 break;
2171 case 4:
2172 EMIT_ASM (amd64_ref4,
2173 "movl (%rax),%eax");
2174 break;
2175 case 8:
2176 EMIT_ASM (amd64_ref8,
2177 "movq (%rax),%rax");
2178 break;
2179 }
2180 }
2181
2182 static void
2183 amd64_emit_if_goto (int *offset_p, int *size_p)
2184 {
2185 EMIT_ASM (amd64_if_goto,
2186 "mov %rax,%rcx\n\t"
2187 "pop %rax\n\t"
2188 "cmp $0,%rcx\n\t"
2189 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2190 if (offset_p)
2191 *offset_p = 10;
2192 if (size_p)
2193 *size_p = 4;
2194 }
2195
2196 static void
2197 amd64_emit_goto (int *offset_p, int *size_p)
2198 {
2199 EMIT_ASM (amd64_goto,
2200 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2201 if (offset_p)
2202 *offset_p = 1;
2203 if (size_p)
2204 *size_p = 4;
2205 }
2206
2207 static void
2208 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2209 {
2210 int diff = (to - (from + size));
2211 unsigned char buf[sizeof (int)];
2212
2213 if (size != 4)
2214 {
2215 emit_error = 1;
2216 return;
2217 }
2218
2219 memcpy (buf, &diff, sizeof (int));
2220 write_inferior_memory (from, buf, sizeof (int));
2221 }
2222
2223 static void
2224 amd64_emit_const (LONGEST num)
2225 {
2226 unsigned char buf[16];
2227 int i;
2228 CORE_ADDR buildaddr = current_insn_ptr;
2229
2230 i = 0;
2231 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2232 memcpy (&buf[i], &num, sizeof (num));
2233 i += 8;
2234 append_insns (&buildaddr, i, buf);
2235 current_insn_ptr = buildaddr;
2236 }
2237
2238 static void
2239 amd64_emit_call (CORE_ADDR fn)
2240 {
2241 unsigned char buf[16];
2242 int i;
2243 CORE_ADDR buildaddr;
2244 LONGEST offset64;
2245
2246 /* The destination function being in the shared library, may be
2247 >31-bits away off the compiled code pad. */
2248
2249 buildaddr = current_insn_ptr;
2250
2251 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2252
2253 i = 0;
2254
2255 if (offset64 > INT_MAX || offset64 < INT_MIN)
2256 {
2257 /* Offset is too large for a call. Use callq, but that requires
2258 a register, so avoid it if possible. Use r10, since it is
2259 call-clobbered, we don't have to push/pop it. */
2260 buf[i++] = 0x48; /* mov $fn,%r10 */
2261 buf[i++] = 0xba;
2262 memcpy (buf + i, &fn, 8);
2263 i += 8;
2264 buf[i++] = 0xff; /* callq *%r10 */
2265 buf[i++] = 0xd2;
2266 }
2267 else
2268 {
2269 int offset32 = offset64; /* we know we can't overflow here. */
2270 memcpy (buf + i, &offset32, 4);
2271 i += 4;
2272 }
2273
2274 append_insns (&buildaddr, i, buf);
2275 current_insn_ptr = buildaddr;
2276 }
2277
2278 static void
2279 amd64_emit_reg (int reg)
2280 {
2281 unsigned char buf[16];
2282 int i;
2283 CORE_ADDR buildaddr;
2284
2285 /* Assume raw_regs is still in %rdi. */
2286 buildaddr = current_insn_ptr;
2287 i = 0;
2288 buf[i++] = 0xbe; /* mov $<n>,%esi */
2289 memcpy (&buf[i], &reg, sizeof (reg));
2290 i += 4;
2291 append_insns (&buildaddr, i, buf);
2292 current_insn_ptr = buildaddr;
2293 amd64_emit_call (get_raw_reg_func_addr ());
2294 }
2295
2296 static void
2297 amd64_emit_pop (void)
2298 {
2299 EMIT_ASM (amd64_pop,
2300 "pop %rax");
2301 }
2302
2303 static void
2304 amd64_emit_stack_flush (void)
2305 {
2306 EMIT_ASM (amd64_stack_flush,
2307 "push %rax");
2308 }
2309
2310 static void
2311 amd64_emit_zero_ext (int arg)
2312 {
2313 switch (arg)
2314 {
2315 case 8:
2316 EMIT_ASM (amd64_zero_ext_8,
2317 "and $0xff,%rax");
2318 break;
2319 case 16:
2320 EMIT_ASM (amd64_zero_ext_16,
2321 "and $0xffff,%rax");
2322 break;
2323 case 32:
2324 EMIT_ASM (amd64_zero_ext_32,
2325 "mov $0xffffffff,%rcx\n\t"
2326 "and %rcx,%rax");
2327 break;
2328 default:
2329 emit_error = 1;
2330 }
2331 }
2332
2333 static void
2334 amd64_emit_swap (void)
2335 {
2336 EMIT_ASM (amd64_swap,
2337 "mov %rax,%rcx\n\t"
2338 "pop %rax\n\t"
2339 "push %rcx");
2340 }
2341
2342 static void
2343 amd64_emit_stack_adjust (int n)
2344 {
2345 unsigned char buf[16];
2346 int i;
2347 CORE_ADDR buildaddr = current_insn_ptr;
2348
2349 i = 0;
2350 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2351 buf[i++] = 0x8d;
2352 buf[i++] = 0x64;
2353 buf[i++] = 0x24;
2354 /* This only handles adjustments up to 16, but we don't expect any more. */
2355 buf[i++] = n * 8;
2356 append_insns (&buildaddr, i, buf);
2357 current_insn_ptr = buildaddr;
2358 }
2359
2360 /* FN's prototype is `LONGEST(*fn)(int)'. */
2361
2362 static void
2363 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2364 {
2365 unsigned char buf[16];
2366 int i;
2367 CORE_ADDR buildaddr;
2368
2369 buildaddr = current_insn_ptr;
2370 i = 0;
2371 buf[i++] = 0xbf; /* movl $<n>,%edi */
2372 memcpy (&buf[i], &arg1, sizeof (arg1));
2373 i += 4;
2374 append_insns (&buildaddr, i, buf);
2375 current_insn_ptr = buildaddr;
2376 amd64_emit_call (fn);
2377 }
2378
2379 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2380
2381 static void
2382 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2383 {
2384 unsigned char buf[16];
2385 int i;
2386 CORE_ADDR buildaddr;
2387
2388 buildaddr = current_insn_ptr;
2389 i = 0;
2390 buf[i++] = 0xbf; /* movl $<n>,%edi */
2391 memcpy (&buf[i], &arg1, sizeof (arg1));
2392 i += 4;
2393 append_insns (&buildaddr, i, buf);
2394 current_insn_ptr = buildaddr;
2395 EMIT_ASM (amd64_void_call_2_a,
2396 /* Save away a copy of the stack top. */
2397 "push %rax\n\t"
2398 /* Also pass top as the second argument. */
2399 "mov %rax,%rsi");
2400 amd64_emit_call (fn);
2401 EMIT_ASM (amd64_void_call_2_b,
2402 /* Restore the stack top, %rax may have been trashed. */
2403 "pop %rax");
2404 }
2405
2406 void
2407 amd64_emit_eq_goto (int *offset_p, int *size_p)
2408 {
2409 EMIT_ASM (amd64_eq,
2410 "cmp %rax,(%rsp)\n\t"
2411 "jne .Lamd64_eq_fallthru\n\t"
2412 "lea 0x8(%rsp),%rsp\n\t"
2413 "pop %rax\n\t"
2414 /* jmp, but don't trust the assembler to choose the right jump */
2415 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2416 ".Lamd64_eq_fallthru:\n\t"
2417 "lea 0x8(%rsp),%rsp\n\t"
2418 "pop %rax");
2419
2420 if (offset_p)
2421 *offset_p = 13;
2422 if (size_p)
2423 *size_p = 4;
2424 }
2425
2426 void
2427 amd64_emit_ne_goto (int *offset_p, int *size_p)
2428 {
2429 EMIT_ASM (amd64_ne,
2430 "cmp %rax,(%rsp)\n\t"
2431 "je .Lamd64_ne_fallthru\n\t"
2432 "lea 0x8(%rsp),%rsp\n\t"
2433 "pop %rax\n\t"
2434 /* jmp, but don't trust the assembler to choose the right jump */
2435 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2436 ".Lamd64_ne_fallthru:\n\t"
2437 "lea 0x8(%rsp),%rsp\n\t"
2438 "pop %rax");
2439
2440 if (offset_p)
2441 *offset_p = 13;
2442 if (size_p)
2443 *size_p = 4;
2444 }
2445
2446 void
2447 amd64_emit_lt_goto (int *offset_p, int *size_p)
2448 {
2449 EMIT_ASM (amd64_lt,
2450 "cmp %rax,(%rsp)\n\t"
2451 "jnl .Lamd64_lt_fallthru\n\t"
2452 "lea 0x8(%rsp),%rsp\n\t"
2453 "pop %rax\n\t"
2454 /* jmp, but don't trust the assembler to choose the right jump */
2455 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2456 ".Lamd64_lt_fallthru:\n\t"
2457 "lea 0x8(%rsp),%rsp\n\t"
2458 "pop %rax");
2459
2460 if (offset_p)
2461 *offset_p = 13;
2462 if (size_p)
2463 *size_p = 4;
2464 }
2465
2466 void
2467 amd64_emit_le_goto (int *offset_p, int *size_p)
2468 {
2469 EMIT_ASM (amd64_le,
2470 "cmp %rax,(%rsp)\n\t"
2471 "jnle .Lamd64_le_fallthru\n\t"
2472 "lea 0x8(%rsp),%rsp\n\t"
2473 "pop %rax\n\t"
2474 /* jmp, but don't trust the assembler to choose the right jump */
2475 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2476 ".Lamd64_le_fallthru:\n\t"
2477 "lea 0x8(%rsp),%rsp\n\t"
2478 "pop %rax");
2479
2480 if (offset_p)
2481 *offset_p = 13;
2482 if (size_p)
2483 *size_p = 4;
2484 }
2485
2486 void
2487 amd64_emit_gt_goto (int *offset_p, int *size_p)
2488 {
2489 EMIT_ASM (amd64_gt,
2490 "cmp %rax,(%rsp)\n\t"
2491 "jng .Lamd64_gt_fallthru\n\t"
2492 "lea 0x8(%rsp),%rsp\n\t"
2493 "pop %rax\n\t"
2494 /* jmp, but don't trust the assembler to choose the right jump */
2495 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2496 ".Lamd64_gt_fallthru:\n\t"
2497 "lea 0x8(%rsp),%rsp\n\t"
2498 "pop %rax");
2499
2500 if (offset_p)
2501 *offset_p = 13;
2502 if (size_p)
2503 *size_p = 4;
2504 }
2505
2506 void
2507 amd64_emit_ge_goto (int *offset_p, int *size_p)
2508 {
2509 EMIT_ASM (amd64_ge,
2510 "cmp %rax,(%rsp)\n\t"
2511 "jnge .Lamd64_ge_fallthru\n\t"
2512 ".Lamd64_ge_jump:\n\t"
2513 "lea 0x8(%rsp),%rsp\n\t"
2514 "pop %rax\n\t"
2515 /* jmp, but don't trust the assembler to choose the right jump */
2516 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2517 ".Lamd64_ge_fallthru:\n\t"
2518 "lea 0x8(%rsp),%rsp\n\t"
2519 "pop %rax");
2520
2521 if (offset_p)
2522 *offset_p = 13;
2523 if (size_p)
2524 *size_p = 4;
2525 }
2526
2527 struct emit_ops amd64_emit_ops =
2528 {
2529 amd64_emit_prologue,
2530 amd64_emit_epilogue,
2531 amd64_emit_add,
2532 amd64_emit_sub,
2533 amd64_emit_mul,
2534 amd64_emit_lsh,
2535 amd64_emit_rsh_signed,
2536 amd64_emit_rsh_unsigned,
2537 amd64_emit_ext,
2538 amd64_emit_log_not,
2539 amd64_emit_bit_and,
2540 amd64_emit_bit_or,
2541 amd64_emit_bit_xor,
2542 amd64_emit_bit_not,
2543 amd64_emit_equal,
2544 amd64_emit_less_signed,
2545 amd64_emit_less_unsigned,
2546 amd64_emit_ref,
2547 amd64_emit_if_goto,
2548 amd64_emit_goto,
2549 amd64_write_goto_address,
2550 amd64_emit_const,
2551 amd64_emit_call,
2552 amd64_emit_reg,
2553 amd64_emit_pop,
2554 amd64_emit_stack_flush,
2555 amd64_emit_zero_ext,
2556 amd64_emit_swap,
2557 amd64_emit_stack_adjust,
2558 amd64_emit_int_call_1,
2559 amd64_emit_void_call_2,
2560 amd64_emit_eq_goto,
2561 amd64_emit_ne_goto,
2562 amd64_emit_lt_goto,
2563 amd64_emit_le_goto,
2564 amd64_emit_gt_goto,
2565 amd64_emit_ge_goto
2566 };
2567
2568 #endif /* __x86_64__ */
2569
2570 static void
2571 i386_emit_prologue (void)
2572 {
2573 EMIT_ASM32 (i386_prologue,
2574 "push %ebp\n\t"
2575 "mov %esp,%ebp\n\t"
2576 "push %ebx");
2577 /* At this point, the raw regs base address is at 8(%ebp), and the
2578 value pointer is at 12(%ebp). */
2579 }
2580
2581 static void
2582 i386_emit_epilogue (void)
2583 {
2584 EMIT_ASM32 (i386_epilogue,
2585 "mov 12(%ebp),%ecx\n\t"
2586 "mov %eax,(%ecx)\n\t"
2587 "mov %ebx,0x4(%ecx)\n\t"
2588 "xor %eax,%eax\n\t"
2589 "pop %ebx\n\t"
2590 "pop %ebp\n\t"
2591 "ret");
2592 }
2593
2594 static void
2595 i386_emit_add (void)
2596 {
2597 EMIT_ASM32 (i386_add,
2598 "add (%esp),%eax\n\t"
2599 "adc 0x4(%esp),%ebx\n\t"
2600 "lea 0x8(%esp),%esp");
2601 }
2602
2603 static void
2604 i386_emit_sub (void)
2605 {
2606 EMIT_ASM32 (i386_sub,
2607 "subl %eax,(%esp)\n\t"
2608 "sbbl %ebx,4(%esp)\n\t"
2609 "pop %eax\n\t"
2610 "pop %ebx\n\t");
2611 }
2612
2613 static void
2614 i386_emit_mul (void)
2615 {
2616 emit_error = 1;
2617 }
2618
2619 static void
2620 i386_emit_lsh (void)
2621 {
2622 emit_error = 1;
2623 }
2624
2625 static void
2626 i386_emit_rsh_signed (void)
2627 {
2628 emit_error = 1;
2629 }
2630
2631 static void
2632 i386_emit_rsh_unsigned (void)
2633 {
2634 emit_error = 1;
2635 }
2636
2637 static void
2638 i386_emit_ext (int arg)
2639 {
2640 switch (arg)
2641 {
2642 case 8:
2643 EMIT_ASM32 (i386_ext_8,
2644 "cbtw\n\t"
2645 "cwtl\n\t"
2646 "movl %eax,%ebx\n\t"
2647 "sarl $31,%ebx");
2648 break;
2649 case 16:
2650 EMIT_ASM32 (i386_ext_16,
2651 "cwtl\n\t"
2652 "movl %eax,%ebx\n\t"
2653 "sarl $31,%ebx");
2654 break;
2655 case 32:
2656 EMIT_ASM32 (i386_ext_32,
2657 "movl %eax,%ebx\n\t"
2658 "sarl $31,%ebx");
2659 break;
2660 default:
2661 emit_error = 1;
2662 }
2663 }
2664
2665 static void
2666 i386_emit_log_not (void)
2667 {
2668 EMIT_ASM32 (i386_log_not,
2669 "or %ebx,%eax\n\t"
2670 "test %eax,%eax\n\t"
2671 "sete %cl\n\t"
2672 "xor %ebx,%ebx\n\t"
2673 "movzbl %cl,%eax");
2674 }
2675
2676 static void
2677 i386_emit_bit_and (void)
2678 {
2679 EMIT_ASM32 (i386_and,
2680 "and (%esp),%eax\n\t"
2681 "and 0x4(%esp),%ebx\n\t"
2682 "lea 0x8(%esp),%esp");
2683 }
2684
2685 static void
2686 i386_emit_bit_or (void)
2687 {
2688 EMIT_ASM32 (i386_or,
2689 "or (%esp),%eax\n\t"
2690 "or 0x4(%esp),%ebx\n\t"
2691 "lea 0x8(%esp),%esp");
2692 }
2693
2694 static void
2695 i386_emit_bit_xor (void)
2696 {
2697 EMIT_ASM32 (i386_xor,
2698 "xor (%esp),%eax\n\t"
2699 "xor 0x4(%esp),%ebx\n\t"
2700 "lea 0x8(%esp),%esp");
2701 }
2702
2703 static void
2704 i386_emit_bit_not (void)
2705 {
2706 EMIT_ASM32 (i386_bit_not,
2707 "xor $0xffffffff,%eax\n\t"
2708 "xor $0xffffffff,%ebx\n\t");
2709 }
2710
2711 static void
2712 i386_emit_equal (void)
2713 {
2714 EMIT_ASM32 (i386_equal,
2715 "cmpl %ebx,4(%esp)\n\t"
2716 "jne .Li386_equal_false\n\t"
2717 "cmpl %eax,(%esp)\n\t"
2718 "je .Li386_equal_true\n\t"
2719 ".Li386_equal_false:\n\t"
2720 "xor %eax,%eax\n\t"
2721 "jmp .Li386_equal_end\n\t"
2722 ".Li386_equal_true:\n\t"
2723 "mov $1,%eax\n\t"
2724 ".Li386_equal_end:\n\t"
2725 "xor %ebx,%ebx\n\t"
2726 "lea 0x8(%esp),%esp");
2727 }
2728
2729 static void
2730 i386_emit_less_signed (void)
2731 {
2732 EMIT_ASM32 (i386_less_signed,
2733 "cmpl %ebx,4(%esp)\n\t"
2734 "jl .Li386_less_signed_true\n\t"
2735 "jne .Li386_less_signed_false\n\t"
2736 "cmpl %eax,(%esp)\n\t"
2737 "jl .Li386_less_signed_true\n\t"
2738 ".Li386_less_signed_false:\n\t"
2739 "xor %eax,%eax\n\t"
2740 "jmp .Li386_less_signed_end\n\t"
2741 ".Li386_less_signed_true:\n\t"
2742 "mov $1,%eax\n\t"
2743 ".Li386_less_signed_end:\n\t"
2744 "xor %ebx,%ebx\n\t"
2745 "lea 0x8(%esp),%esp");
2746 }
2747
2748 static void
2749 i386_emit_less_unsigned (void)
2750 {
2751 EMIT_ASM32 (i386_less_unsigned,
2752 "cmpl %ebx,4(%esp)\n\t"
2753 "jb .Li386_less_unsigned_true\n\t"
2754 "jne .Li386_less_unsigned_false\n\t"
2755 "cmpl %eax,(%esp)\n\t"
2756 "jb .Li386_less_unsigned_true\n\t"
2757 ".Li386_less_unsigned_false:\n\t"
2758 "xor %eax,%eax\n\t"
2759 "jmp .Li386_less_unsigned_end\n\t"
2760 ".Li386_less_unsigned_true:\n\t"
2761 "mov $1,%eax\n\t"
2762 ".Li386_less_unsigned_end:\n\t"
2763 "xor %ebx,%ebx\n\t"
2764 "lea 0x8(%esp),%esp");
2765 }
2766
2767 static void
2768 i386_emit_ref (int size)
2769 {
2770 switch (size)
2771 {
2772 case 1:
2773 EMIT_ASM32 (i386_ref1,
2774 "movb (%eax),%al");
2775 break;
2776 case 2:
2777 EMIT_ASM32 (i386_ref2,
2778 "movw (%eax),%ax");
2779 break;
2780 case 4:
2781 EMIT_ASM32 (i386_ref4,
2782 "movl (%eax),%eax");
2783 break;
2784 case 8:
2785 EMIT_ASM32 (i386_ref8,
2786 "movl 4(%eax),%ebx\n\t"
2787 "movl (%eax),%eax");
2788 break;
2789 }
2790 }
2791
2792 static void
2793 i386_emit_if_goto (int *offset_p, int *size_p)
2794 {
2795 EMIT_ASM32 (i386_if_goto,
2796 "mov %eax,%ecx\n\t"
2797 "or %ebx,%ecx\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx\n\t"
2800 "cmpl $0,%ecx\n\t"
2801 /* Don't trust the assembler to choose the right jump */
2802 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2803
2804 if (offset_p)
2805 *offset_p = 11; /* be sure that this matches the sequence above */
2806 if (size_p)
2807 *size_p = 4;
2808 }
2809
2810 static void
2811 i386_emit_goto (int *offset_p, int *size_p)
2812 {
2813 EMIT_ASM32 (i386_goto,
2814 /* Don't trust the assembler to choose the right jump */
2815 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2816 if (offset_p)
2817 *offset_p = 1;
2818 if (size_p)
2819 *size_p = 4;
2820 }
2821
2822 static void
2823 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2824 {
2825 int diff = (to - (from + size));
2826 unsigned char buf[sizeof (int)];
2827
2828 /* We're only doing 4-byte sizes at the moment. */
2829 if (size != 4)
2830 {
2831 emit_error = 1;
2832 return;
2833 }
2834
2835 memcpy (buf, &diff, sizeof (int));
2836 write_inferior_memory (from, buf, sizeof (int));
2837 }
2838
2839 static void
2840 i386_emit_const (LONGEST num)
2841 {
2842 unsigned char buf[16];
2843 int i, hi, lo;
2844 CORE_ADDR buildaddr = current_insn_ptr;
2845
2846 i = 0;
2847 buf[i++] = 0xb8; /* mov $<n>,%eax */
2848 lo = num & 0xffffffff;
2849 memcpy (&buf[i], &lo, sizeof (lo));
2850 i += 4;
2851 hi = ((num >> 32) & 0xffffffff);
2852 if (hi)
2853 {
2854 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2855 memcpy (&buf[i], &hi, sizeof (hi));
2856 i += 4;
2857 }
2858 else
2859 {
2860 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2861 }
2862 append_insns (&buildaddr, i, buf);
2863 current_insn_ptr = buildaddr;
2864 }
2865
2866 static void
2867 i386_emit_call (CORE_ADDR fn)
2868 {
2869 unsigned char buf[16];
2870 int i, offset;
2871 CORE_ADDR buildaddr;
2872
2873 buildaddr = current_insn_ptr;
2874 i = 0;
2875 buf[i++] = 0xe8; /* call <reladdr> */
2876 offset = ((int) fn) - (buildaddr + 5);
2877 memcpy (buf + 1, &offset, 4);
2878 append_insns (&buildaddr, 5, buf);
2879 current_insn_ptr = buildaddr;
2880 }
2881
2882 static void
2883 i386_emit_reg (int reg)
2884 {
2885 unsigned char buf[16];
2886 int i;
2887 CORE_ADDR buildaddr;
2888
2889 EMIT_ASM32 (i386_reg_a,
2890 "sub $0x8,%esp");
2891 buildaddr = current_insn_ptr;
2892 i = 0;
2893 buf[i++] = 0xb8; /* mov $<n>,%eax */
2894 memcpy (&buf[i], &reg, sizeof (reg));
2895 i += 4;
2896 append_insns (&buildaddr, i, buf);
2897 current_insn_ptr = buildaddr;
2898 EMIT_ASM32 (i386_reg_b,
2899 "mov %eax,4(%esp)\n\t"
2900 "mov 8(%ebp),%eax\n\t"
2901 "mov %eax,(%esp)");
2902 i386_emit_call (get_raw_reg_func_addr ());
2903 EMIT_ASM32 (i386_reg_c,
2904 "xor %ebx,%ebx\n\t"
2905 "lea 0x8(%esp),%esp");
2906 }
2907
2908 static void
2909 i386_emit_pop (void)
2910 {
2911 EMIT_ASM32 (i386_pop,
2912 "pop %eax\n\t"
2913 "pop %ebx");
2914 }
2915
2916 static void
2917 i386_emit_stack_flush (void)
2918 {
2919 EMIT_ASM32 (i386_stack_flush,
2920 "push %ebx\n\t"
2921 "push %eax");
2922 }
2923
2924 static void
2925 i386_emit_zero_ext (int arg)
2926 {
2927 switch (arg)
2928 {
2929 case 8:
2930 EMIT_ASM32 (i386_zero_ext_8,
2931 "and $0xff,%eax\n\t"
2932 "xor %ebx,%ebx");
2933 break;
2934 case 16:
2935 EMIT_ASM32 (i386_zero_ext_16,
2936 "and $0xffff,%eax\n\t"
2937 "xor %ebx,%ebx");
2938 break;
2939 case 32:
2940 EMIT_ASM32 (i386_zero_ext_32,
2941 "xor %ebx,%ebx");
2942 break;
2943 default:
2944 emit_error = 1;
2945 }
2946 }
2947
2948 static void
2949 i386_emit_swap (void)
2950 {
2951 EMIT_ASM32 (i386_swap,
2952 "mov %eax,%ecx\n\t"
2953 "mov %ebx,%edx\n\t"
2954 "pop %eax\n\t"
2955 "pop %ebx\n\t"
2956 "push %edx\n\t"
2957 "push %ecx");
2958 }
2959
2960 static void
2961 i386_emit_stack_adjust (int n)
2962 {
2963 unsigned char buf[16];
2964 int i;
2965 CORE_ADDR buildaddr = current_insn_ptr;
2966
2967 i = 0;
2968 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2969 buf[i++] = 0x64;
2970 buf[i++] = 0x24;
2971 buf[i++] = n * 8;
2972 append_insns (&buildaddr, i, buf);
2973 current_insn_ptr = buildaddr;
2974 }
2975
2976 /* FN's prototype is `LONGEST(*fn)(int)'. */
2977
2978 static void
2979 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2980 {
2981 unsigned char buf[16];
2982 int i;
2983 CORE_ADDR buildaddr;
2984
2985 EMIT_ASM32 (i386_int_call_1_a,
2986 /* Reserve a bit of stack space. */
2987 "sub $0x8,%esp");
2988 /* Put the one argument on the stack. */
2989 buildaddr = current_insn_ptr;
2990 i = 0;
2991 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2992 buf[i++] = 0x04;
2993 buf[i++] = 0x24;
2994 memcpy (&buf[i], &arg1, sizeof (arg1));
2995 i += 4;
2996 append_insns (&buildaddr, i, buf);
2997 current_insn_ptr = buildaddr;
2998 i386_emit_call (fn);
2999 EMIT_ASM32 (i386_int_call_1_c,
3000 "mov %edx,%ebx\n\t"
3001 "lea 0x8(%esp),%esp");
3002 }
3003
3004 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3005
3006 static void
3007 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3008 {
3009 unsigned char buf[16];
3010 int i;
3011 CORE_ADDR buildaddr;
3012
3013 EMIT_ASM32 (i386_void_call_2_a,
3014 /* Preserve %eax only; we don't have to worry about %ebx. */
3015 "push %eax\n\t"
3016 /* Reserve a bit of stack space for arguments. */
3017 "sub $0x10,%esp\n\t"
3018 /* Copy "top" to the second argument position. (Note that
3019 we can't assume function won't scribble on its
3020 arguments, so don't try to restore from this.) */
3021 "mov %eax,4(%esp)\n\t"
3022 "mov %ebx,8(%esp)");
3023 /* Put the first argument on the stack. */
3024 buildaddr = current_insn_ptr;
3025 i = 0;
3026 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3027 buf[i++] = 0x04;
3028 buf[i++] = 0x24;
3029 memcpy (&buf[i], &arg1, sizeof (arg1));
3030 i += 4;
3031 append_insns (&buildaddr, i, buf);
3032 current_insn_ptr = buildaddr;
3033 i386_emit_call (fn);
3034 EMIT_ASM32 (i386_void_call_2_b,
3035 "lea 0x10(%esp),%esp\n\t"
3036 /* Restore original stack top. */
3037 "pop %eax");
3038 }
3039
3040
3041 void
3042 i386_emit_eq_goto (int *offset_p, int *size_p)
3043 {
3044 EMIT_ASM32 (eq,
3045 /* Check low half first, more likely to be decider */
3046 "cmpl %eax,(%esp)\n\t"
3047 "jne .Leq_fallthru\n\t"
3048 "cmpl %ebx,4(%esp)\n\t"
3049 "jne .Leq_fallthru\n\t"
3050 "lea 0x8(%esp),%esp\n\t"
3051 "pop %eax\n\t"
3052 "pop %ebx\n\t"
3053 /* jmp, but don't trust the assembler to choose the right jump */
3054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3055 ".Leq_fallthru:\n\t"
3056 "lea 0x8(%esp),%esp\n\t"
3057 "pop %eax\n\t"
3058 "pop %ebx");
3059
3060 if (offset_p)
3061 *offset_p = 18;
3062 if (size_p)
3063 *size_p = 4;
3064 }
3065
3066 void
3067 i386_emit_ne_goto (int *offset_p, int *size_p)
3068 {
3069 EMIT_ASM32 (ne,
3070 /* Check low half first, more likely to be decider */
3071 "cmpl %eax,(%esp)\n\t"
3072 "jne .Lne_jump\n\t"
3073 "cmpl %ebx,4(%esp)\n\t"
3074 "je .Lne_fallthru\n\t"
3075 ".Lne_jump:\n\t"
3076 "lea 0x8(%esp),%esp\n\t"
3077 "pop %eax\n\t"
3078 "pop %ebx\n\t"
3079 /* jmp, but don't trust the assembler to choose the right jump */
3080 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3081 ".Lne_fallthru:\n\t"
3082 "lea 0x8(%esp),%esp\n\t"
3083 "pop %eax\n\t"
3084 "pop %ebx");
3085
3086 if (offset_p)
3087 *offset_p = 18;
3088 if (size_p)
3089 *size_p = 4;
3090 }
3091
3092 void
3093 i386_emit_lt_goto (int *offset_p, int *size_p)
3094 {
3095 EMIT_ASM32 (lt,
3096 "cmpl %ebx,4(%esp)\n\t"
3097 "jl .Llt_jump\n\t"
3098 "jne .Llt_fallthru\n\t"
3099 "cmpl %eax,(%esp)\n\t"
3100 "jnl .Llt_fallthru\n\t"
3101 ".Llt_jump:\n\t"
3102 "lea 0x8(%esp),%esp\n\t"
3103 "pop %eax\n\t"
3104 "pop %ebx\n\t"
3105 /* jmp, but don't trust the assembler to choose the right jump */
3106 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3107 ".Llt_fallthru:\n\t"
3108 "lea 0x8(%esp),%esp\n\t"
3109 "pop %eax\n\t"
3110 "pop %ebx");
3111
3112 if (offset_p)
3113 *offset_p = 20;
3114 if (size_p)
3115 *size_p = 4;
3116 }
3117
3118 void
3119 i386_emit_le_goto (int *offset_p, int *size_p)
3120 {
3121 EMIT_ASM32 (le,
3122 "cmpl %ebx,4(%esp)\n\t"
3123 "jle .Lle_jump\n\t"
3124 "jne .Lle_fallthru\n\t"
3125 "cmpl %eax,(%esp)\n\t"
3126 "jnle .Lle_fallthru\n\t"
3127 ".Lle_jump:\n\t"
3128 "lea 0x8(%esp),%esp\n\t"
3129 "pop %eax\n\t"
3130 "pop %ebx\n\t"
3131 /* jmp, but don't trust the assembler to choose the right jump */
3132 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3133 ".Lle_fallthru:\n\t"
3134 "lea 0x8(%esp),%esp\n\t"
3135 "pop %eax\n\t"
3136 "pop %ebx");
3137
3138 if (offset_p)
3139 *offset_p = 20;
3140 if (size_p)
3141 *size_p = 4;
3142 }
3143
3144 void
3145 i386_emit_gt_goto (int *offset_p, int *size_p)
3146 {
3147 EMIT_ASM32 (gt,
3148 "cmpl %ebx,4(%esp)\n\t"
3149 "jg .Lgt_jump\n\t"
3150 "jne .Lgt_fallthru\n\t"
3151 "cmpl %eax,(%esp)\n\t"
3152 "jng .Lgt_fallthru\n\t"
3153 ".Lgt_jump:\n\t"
3154 "lea 0x8(%esp),%esp\n\t"
3155 "pop %eax\n\t"
3156 "pop %ebx\n\t"
3157 /* jmp, but don't trust the assembler to choose the right jump */
3158 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3159 ".Lgt_fallthru:\n\t"
3160 "lea 0x8(%esp),%esp\n\t"
3161 "pop %eax\n\t"
3162 "pop %ebx");
3163
3164 if (offset_p)
3165 *offset_p = 20;
3166 if (size_p)
3167 *size_p = 4;
3168 }
3169
3170 void
3171 i386_emit_ge_goto (int *offset_p, int *size_p)
3172 {
3173 EMIT_ASM32 (ge,
3174 "cmpl %ebx,4(%esp)\n\t"
3175 "jge .Lge_jump\n\t"
3176 "jne .Lge_fallthru\n\t"
3177 "cmpl %eax,(%esp)\n\t"
3178 "jnge .Lge_fallthru\n\t"
3179 ".Lge_jump:\n\t"
3180 "lea 0x8(%esp),%esp\n\t"
3181 "pop %eax\n\t"
3182 "pop %ebx\n\t"
3183 /* jmp, but don't trust the assembler to choose the right jump */
3184 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3185 ".Lge_fallthru:\n\t"
3186 "lea 0x8(%esp),%esp\n\t"
3187 "pop %eax\n\t"
3188 "pop %ebx");
3189
3190 if (offset_p)
3191 *offset_p = 20;
3192 if (size_p)
3193 *size_p = 4;
3194 }
3195
3196 struct emit_ops i386_emit_ops =
3197 {
3198 i386_emit_prologue,
3199 i386_emit_epilogue,
3200 i386_emit_add,
3201 i386_emit_sub,
3202 i386_emit_mul,
3203 i386_emit_lsh,
3204 i386_emit_rsh_signed,
3205 i386_emit_rsh_unsigned,
3206 i386_emit_ext,
3207 i386_emit_log_not,
3208 i386_emit_bit_and,
3209 i386_emit_bit_or,
3210 i386_emit_bit_xor,
3211 i386_emit_bit_not,
3212 i386_emit_equal,
3213 i386_emit_less_signed,
3214 i386_emit_less_unsigned,
3215 i386_emit_ref,
3216 i386_emit_if_goto,
3217 i386_emit_goto,
3218 i386_write_goto_address,
3219 i386_emit_const,
3220 i386_emit_call,
3221 i386_emit_reg,
3222 i386_emit_pop,
3223 i386_emit_stack_flush,
3224 i386_emit_zero_ext,
3225 i386_emit_swap,
3226 i386_emit_stack_adjust,
3227 i386_emit_int_call_1,
3228 i386_emit_void_call_2,
3229 i386_emit_eq_goto,
3230 i386_emit_ne_goto,
3231 i386_emit_lt_goto,
3232 i386_emit_le_goto,
3233 i386_emit_gt_goto,
3234 i386_emit_ge_goto
3235 };
3236
3237
3238 static struct emit_ops *
3239 x86_emit_ops (void)
3240 {
3241 #ifdef __x86_64__
3242 if (is_64bit_tdesc ())
3243 return &amd64_emit_ops;
3244 else
3245 #endif
3246 return &i386_emit_ops;
3247 }
3248
3249 static int
3250 x86_supports_range_stepping (void)
3251 {
3252 return 1;
3253 }
3254
3255 /* This is initialized assuming an amd64 target.
3256 x86_arch_setup will correct it for i386 or amd64 targets. */
3257
3258 struct linux_target_ops the_low_target =
3259 {
3260 x86_arch_setup,
3261 x86_linux_regs_info,
3262 x86_cannot_fetch_register,
3263 x86_cannot_store_register,
3264 NULL, /* fetch_register */
3265 x86_get_pc,
3266 x86_set_pc,
3267 x86_breakpoint,
3268 x86_breakpoint_len,
3269 NULL,
3270 1,
3271 x86_breakpoint_at,
3272 x86_supports_z_point_type,
3273 x86_insert_point,
3274 x86_remove_point,
3275 x86_stopped_by_watchpoint,
3276 x86_stopped_data_address,
3277 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3278 native i386 case (no registers smaller than an xfer unit), and are not
3279 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3280 NULL,
3281 NULL,
3282 /* need to fix up i386 siginfo if host is amd64 */
3283 x86_siginfo_fixup,
3284 x86_linux_new_process,
3285 x86_linux_new_thread,
3286 x86_linux_new_fork,
3287 x86_linux_prepare_to_resume,
3288 x86_linux_process_qsupported,
3289 x86_supports_tracepoints,
3290 x86_get_thread_area,
3291 x86_install_fast_tracepoint_jump_pad,
3292 x86_emit_ops,
3293 x86_get_min_fast_tracepoint_insn_len,
3294 x86_supports_range_stepping,
3295 };
3296
3297 void
3298 initialize_low_arch (void)
3299 {
3300 /* Initialize the Linux target descriptions. */
3301 #ifdef __x86_64__
3302 init_registers_amd64_linux ();
3303 init_registers_amd64_avx_linux ();
3304 init_registers_amd64_avx512_linux ();
3305 init_registers_amd64_mpx_linux ();
3306
3307 init_registers_x32_linux ();
3308 init_registers_x32_avx_linux ();
3309 init_registers_x32_avx512_linux ();
3310
3311 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3312 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3313 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3314 #endif
3315 init_registers_i386_linux ();
3316 init_registers_i386_mmx_linux ();
3317 init_registers_i386_avx_linux ();
3318 init_registers_i386_avx512_linux ();
3319 init_registers_i386_mpx_linux ();
3320
3321 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3322 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3323 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3324
3325 initialize_regsets_info (&x86_regsets_info);
3326 }
This page took 0.094012 seconds and 5 git commands to generate.