gdbserver gnu/linux: stepping over breakpoint
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
43
44 #ifdef __x86_64__
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc *tdesc_amd64_linux;
48
49 /* Defined in auto-generated file amd64-avx-linux.c. */
50 void init_registers_amd64_avx_linux (void);
51 extern const struct target_desc *tdesc_amd64_avx_linux;
52
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
54 void init_registers_amd64_avx512_linux (void);
55 extern const struct target_desc *tdesc_amd64_avx512_linux;
56
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
58 void init_registers_amd64_mpx_linux (void);
59 extern const struct target_desc *tdesc_amd64_mpx_linux;
60
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc *tdesc_x32_linux;
64
65 /* Defined in auto-generated file x32-avx-linux.c. */
66 void init_registers_x32_avx_linux (void);
67 extern const struct target_desc *tdesc_x32_avx_linux;
68
69 /* Defined in auto-generated file x32-avx512-linux.c. */
70 void init_registers_x32_avx512_linux (void);
71 extern const struct target_desc *tdesc_x32_avx512_linux;
72
73 #endif
74
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc *tdesc_i386_linux;
78
79 /* Defined in auto-generated file i386-mmx-linux.c. */
80 void init_registers_i386_mmx_linux (void);
81 extern const struct target_desc *tdesc_i386_mmx_linux;
82
83 /* Defined in auto-generated file i386-avx-linux.c. */
84 void init_registers_i386_avx_linux (void);
85 extern const struct target_desc *tdesc_i386_avx_linux;
86
87 /* Defined in auto-generated file i386-avx512-linux.c. */
88 void init_registers_i386_avx512_linux (void);
89 extern const struct target_desc *tdesc_i386_avx512_linux;
90
91 /* Defined in auto-generated file i386-mpx-linux.c. */
92 void init_registers_i386_mpx_linux (void);
93 extern const struct target_desc *tdesc_i386_mpx_linux;
94
95 #ifdef __x86_64__
96 static struct target_desc *tdesc_amd64_linux_no_xml;
97 #endif
98 static struct target_desc *tdesc_i386_linux_no_xml;
99
100
101 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
103
104 /* Backward compatibility for gdb without XML support. */
105
106 static const char *xmltarget_i386_linux_no_xml = "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
109 </target>";
110
111 #ifdef __x86_64__
112 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
115 </target>";
116 #endif
117
118 #include <sys/reg.h>
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
121 #include <sys/uio.h>
122
123 #ifndef PTRACE_GETREGSET
124 #define PTRACE_GETREGSET 0x4204
125 #endif
126
127 #ifndef PTRACE_SETREGSET
128 #define PTRACE_SETREGSET 0x4205
129 #endif
130
131
132 #ifndef PTRACE_GET_THREAD_AREA
133 #define PTRACE_GET_THREAD_AREA 25
134 #endif
135
136 /* This definition comes from prctl.h, but some kernels may not have it. */
137 #ifndef PTRACE_ARCH_PRCTL
138 #define PTRACE_ARCH_PRCTL 30
139 #endif
140
141 /* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
143 #ifndef ARCH_GET_FS
144 #define ARCH_SET_GS 0x1001
145 #define ARCH_SET_FS 0x1002
146 #define ARCH_GET_FS 0x1003
147 #define ARCH_GET_GS 0x1004
148 #endif
149
150 /* Per-process arch-specific data we want to keep. */
151
152 struct arch_process_info
153 {
154 struct x86_debug_reg_state debug_reg_state;
155 };
156
157 #ifdef __x86_64__
158
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap[] =
163 {
164 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
165 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
166 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
167 DS * 8, ES * 8, FS * 8, GS * 8
168 };
169
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
174 #define REGSIZE 8
175
176 static const int x86_64_regmap[] =
177 {
178 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
179 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
180 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
181 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
182 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
183 DS * 8, ES * 8, FS * 8, GS * 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 ORIG_RAX * 8,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
201 };
202
203 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
204 #define X86_64_USER_REGS (GS + 1)
205
206 #else /* ! __x86_64__ */
207
208 /* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210 static /*const*/ int i386_regmap[] =
211 {
212 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
213 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
214 EIP * 4, EFL * 4, CS * 4, SS * 4,
215 DS * 4, ES * 4, FS * 4, GS * 4
216 };
217
218 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
219
220 #define REGSIZE 4
221
222 #endif
223
224 #ifdef __x86_64__
225
226 /* Returns true if the current inferior belongs to a x86-64 process,
227 per the tdesc. */
228
229 static int
230 is_64bit_tdesc (void)
231 {
232 struct regcache *regcache = get_thread_regcache (current_thread, 0);
233
234 return register_size (regcache->tdesc, 0) == 8;
235 }
236
237 #endif
238
239 \f
240 /* Called by libthread_db. */
241
242 ps_err_e
243 ps_get_thread_area (const struct ps_prochandle *ph,
244 lwpid_t lwpid, int idx, void **base)
245 {
246 #ifdef __x86_64__
247 int use_64bit = is_64bit_tdesc ();
248
249 if (use_64bit)
250 {
251 switch (idx)
252 {
253 case FS:
254 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
255 return PS_OK;
256 break;
257 case GS:
258 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
259 return PS_OK;
260 break;
261 default:
262 return PS_BADADDR;
263 }
264 return PS_ERR;
265 }
266 #endif
267
268 {
269 unsigned int desc[4];
270
271 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
272 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
273 return PS_ERR;
274
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base = (void *) (uintptr_t) desc[1];
277 return PS_OK;
278 }
279 }
280
281 /* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
285
286 static int
287 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
288 {
289 #ifdef __x86_64__
290 int use_64bit = is_64bit_tdesc ();
291
292 if (use_64bit)
293 {
294 void *base;
295 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
296 {
297 *addr = (CORE_ADDR) (uintptr_t) base;
298 return 0;
299 }
300
301 return -1;
302 }
303 #endif
304
305 {
306 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
307 struct thread_info *thr = get_lwp_thread (lwp);
308 struct regcache *regcache = get_thread_regcache (thr, 1);
309 unsigned int desc[4];
310 ULONGEST gs = 0;
311 const int reg_thread_area = 3; /* bits to scale down register value. */
312 int idx;
313
314 collect_register_by_name (regcache, "gs", &gs);
315
316 idx = gs >> reg_thread_area;
317
318 if (ptrace (PTRACE_GET_THREAD_AREA,
319 lwpid_of (thr),
320 (void *) (long) idx, (unsigned long) &desc) < 0)
321 return -1;
322
323 *addr = desc[1];
324 return 0;
325 }
326 }
327
328
329 \f
330 static int
331 x86_cannot_store_register (int regno)
332 {
333 #ifdef __x86_64__
334 if (is_64bit_tdesc ())
335 return 0;
336 #endif
337
338 return regno >= I386_NUM_REGS;
339 }
340
341 static int
342 x86_cannot_fetch_register (int regno)
343 {
344 #ifdef __x86_64__
345 if (is_64bit_tdesc ())
346 return 0;
347 #endif
348
349 return regno >= I386_NUM_REGS;
350 }
351
352 static void
353 x86_fill_gregset (struct regcache *regcache, void *buf)
354 {
355 int i;
356
357 #ifdef __x86_64__
358 if (register_size (regcache->tdesc, 0) == 8)
359 {
360 for (i = 0; i < X86_64_NUM_REGS; i++)
361 if (x86_64_regmap[i] != -1)
362 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
363 return;
364 }
365
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf, 0x00, X86_64_USER_REGS * 8);
369 #endif
370
371 for (i = 0; i < I386_NUM_REGS; i++)
372 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
373
374 collect_register_by_name (regcache, "orig_eax",
375 ((char *) buf) + ORIG_EAX * REGSIZE);
376 }
377
378 static void
379 x86_store_gregset (struct regcache *regcache, const void *buf)
380 {
381 int i;
382
383 #ifdef __x86_64__
384 if (register_size (regcache->tdesc, 0) == 8)
385 {
386 for (i = 0; i < X86_64_NUM_REGS; i++)
387 if (x86_64_regmap[i] != -1)
388 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
389 return;
390 }
391 #endif
392
393 for (i = 0; i < I386_NUM_REGS; i++)
394 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
395
396 supply_register_by_name (regcache, "orig_eax",
397 ((char *) buf) + ORIG_EAX * REGSIZE);
398 }
399
400 static void
401 x86_fill_fpregset (struct regcache *regcache, void *buf)
402 {
403 #ifdef __x86_64__
404 i387_cache_to_fxsave (regcache, buf);
405 #else
406 i387_cache_to_fsave (regcache, buf);
407 #endif
408 }
409
410 static void
411 x86_store_fpregset (struct regcache *regcache, const void *buf)
412 {
413 #ifdef __x86_64__
414 i387_fxsave_to_cache (regcache, buf);
415 #else
416 i387_fsave_to_cache (regcache, buf);
417 #endif
418 }
419
420 #ifndef __x86_64__
421
422 static void
423 x86_fill_fpxregset (struct regcache *regcache, void *buf)
424 {
425 i387_cache_to_fxsave (regcache, buf);
426 }
427
428 static void
429 x86_store_fpxregset (struct regcache *regcache, const void *buf)
430 {
431 i387_fxsave_to_cache (regcache, buf);
432 }
433
434 #endif
435
436 static void
437 x86_fill_xstateregset (struct regcache *regcache, void *buf)
438 {
439 i387_cache_to_xsave (regcache, buf);
440 }
441
442 static void
443 x86_store_xstateregset (struct regcache *regcache, const void *buf)
444 {
445 i387_xsave_to_cache (regcache, buf);
446 }
447
448 /* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
453 and update the supported regsets accordingly. */
454
455 static struct regset_info x86_regsets[] =
456 {
457 #ifdef HAVE_PTRACE_GETREGS
458 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
459 GENERAL_REGS,
460 x86_fill_gregset, x86_store_gregset },
461 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
462 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
463 # ifndef __x86_64__
464 # ifdef HAVE_PTRACE_GETFPXREGS
465 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
466 EXTENDED_REGS,
467 x86_fill_fpxregset, x86_store_fpxregset },
468 # endif
469 # endif
470 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
471 FP_REGS,
472 x86_fill_fpregset, x86_store_fpregset },
473 #endif /* HAVE_PTRACE_GETREGS */
474 { 0, 0, 0, -1, -1, NULL, NULL }
475 };
476
477 static CORE_ADDR
478 x86_get_pc (struct regcache *regcache)
479 {
480 int use_64bit = register_size (regcache->tdesc, 0) == 8;
481
482 if (use_64bit)
483 {
484 unsigned long pc;
485 collect_register_by_name (regcache, "rip", &pc);
486 return (CORE_ADDR) pc;
487 }
488 else
489 {
490 unsigned int pc;
491 collect_register_by_name (regcache, "eip", &pc);
492 return (CORE_ADDR) pc;
493 }
494 }
495
496 static void
497 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
498 {
499 int use_64bit = register_size (regcache->tdesc, 0) == 8;
500
501 if (use_64bit)
502 {
503 unsigned long newpc = pc;
504 supply_register_by_name (regcache, "rip", &newpc);
505 }
506 else
507 {
508 unsigned int newpc = pc;
509 supply_register_by_name (regcache, "eip", &newpc);
510 }
511 }
512 \f
513 static const unsigned char x86_breakpoint[] = { 0xCC };
514 #define x86_breakpoint_len 1
515
516 static int
517 x86_breakpoint_at (CORE_ADDR pc)
518 {
519 unsigned char c;
520
521 (*the_target->read_memory) (pc, &c, 1);
522 if (c == 0xCC)
523 return 1;
524
525 return 0;
526 }
527 \f
528 /* Low-level function vector. */
529 struct x86_dr_low_type x86_dr_low =
530 {
531 x86_linux_dr_set_control,
532 x86_linux_dr_set_addr,
533 x86_linux_dr_get_addr,
534 x86_linux_dr_get_status,
535 x86_linux_dr_get_control,
536 sizeof (void *),
537 };
538 \f
539 /* Breakpoint/Watchpoint support. */
540
541 static int
542 x86_supports_z_point_type (char z_type)
543 {
544 switch (z_type)
545 {
546 case Z_PACKET_SW_BP:
547 case Z_PACKET_HW_BP:
548 case Z_PACKET_WRITE_WP:
549 case Z_PACKET_ACCESS_WP:
550 return 1;
551 default:
552 return 0;
553 }
554 }
555
556 static int
557 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
558 int size, struct raw_breakpoint *bp)
559 {
560 struct process_info *proc = current_process ();
561
562 switch (type)
563 {
564 case raw_bkpt_type_hw:
565 case raw_bkpt_type_write_wp:
566 case raw_bkpt_type_access_wp:
567 {
568 enum target_hw_bp_type hw_type
569 = raw_bkpt_type_to_target_hw_bp_type (type);
570 struct x86_debug_reg_state *state
571 = &proc->priv->arch_private->debug_reg_state;
572
573 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
574 }
575
576 default:
577 /* Unsupported. */
578 return 1;
579 }
580 }
581
582 static int
583 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
584 int size, struct raw_breakpoint *bp)
585 {
586 struct process_info *proc = current_process ();
587
588 switch (type)
589 {
590 case raw_bkpt_type_hw:
591 case raw_bkpt_type_write_wp:
592 case raw_bkpt_type_access_wp:
593 {
594 enum target_hw_bp_type hw_type
595 = raw_bkpt_type_to_target_hw_bp_type (type);
596 struct x86_debug_reg_state *state
597 = &proc->priv->arch_private->debug_reg_state;
598
599 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
600 }
601 default:
602 /* Unsupported. */
603 return 1;
604 }
605 }
606
607 static int
608 x86_stopped_by_watchpoint (void)
609 {
610 struct process_info *proc = current_process ();
611 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
612 }
613
614 static CORE_ADDR
615 x86_stopped_data_address (void)
616 {
617 struct process_info *proc = current_process ();
618 CORE_ADDR addr;
619 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
620 &addr))
621 return addr;
622 return 0;
623 }
624 \f
625 /* Called when a new process is created. */
626
627 static struct arch_process_info *
628 x86_linux_new_process (void)
629 {
630 struct arch_process_info *info = XCNEW (struct arch_process_info);
631
632 x86_low_init_dregs (&info->debug_reg_state);
633
634 return info;
635 }
636
637 /* See nat/x86-dregs.h. */
638
639 struct x86_debug_reg_state *
640 x86_debug_reg_state (pid_t pid)
641 {
642 struct process_info *proc = find_process_pid (pid);
643
644 return &proc->priv->arch_private->debug_reg_state;
645 }
646 \f
647 /* When GDBSERVER is built as a 64-bit application on linux, the
648 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
649 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
650 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
651 conversion in-place ourselves. */
652
653 /* These types below (compat_*) define a siginfo type that is layout
654 compatible with the siginfo type exported by the 32-bit userspace
655 support. */
656
657 #ifdef __x86_64__
658
659 typedef int compat_int_t;
660 typedef unsigned int compat_uptr_t;
661
662 typedef int compat_time_t;
663 typedef int compat_timer_t;
664 typedef int compat_clock_t;
665
666 struct compat_timeval
667 {
668 compat_time_t tv_sec;
669 int tv_usec;
670 };
671
672 typedef union compat_sigval
673 {
674 compat_int_t sival_int;
675 compat_uptr_t sival_ptr;
676 } compat_sigval_t;
677
678 typedef struct compat_siginfo
679 {
680 int si_signo;
681 int si_errno;
682 int si_code;
683
684 union
685 {
686 int _pad[((128 / sizeof (int)) - 3)];
687
688 /* kill() */
689 struct
690 {
691 unsigned int _pid;
692 unsigned int _uid;
693 } _kill;
694
695 /* POSIX.1b timers */
696 struct
697 {
698 compat_timer_t _tid;
699 int _overrun;
700 compat_sigval_t _sigval;
701 } _timer;
702
703 /* POSIX.1b signals */
704 struct
705 {
706 unsigned int _pid;
707 unsigned int _uid;
708 compat_sigval_t _sigval;
709 } _rt;
710
711 /* SIGCHLD */
712 struct
713 {
714 unsigned int _pid;
715 unsigned int _uid;
716 int _status;
717 compat_clock_t _utime;
718 compat_clock_t _stime;
719 } _sigchld;
720
721 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
722 struct
723 {
724 unsigned int _addr;
725 } _sigfault;
726
727 /* SIGPOLL */
728 struct
729 {
730 int _band;
731 int _fd;
732 } _sigpoll;
733 } _sifields;
734 } compat_siginfo_t;
735
736 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
737 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
738
739 typedef struct compat_x32_siginfo
740 {
741 int si_signo;
742 int si_errno;
743 int si_code;
744
745 union
746 {
747 int _pad[((128 / sizeof (int)) - 3)];
748
749 /* kill() */
750 struct
751 {
752 unsigned int _pid;
753 unsigned int _uid;
754 } _kill;
755
756 /* POSIX.1b timers */
757 struct
758 {
759 compat_timer_t _tid;
760 int _overrun;
761 compat_sigval_t _sigval;
762 } _timer;
763
764 /* POSIX.1b signals */
765 struct
766 {
767 unsigned int _pid;
768 unsigned int _uid;
769 compat_sigval_t _sigval;
770 } _rt;
771
772 /* SIGCHLD */
773 struct
774 {
775 unsigned int _pid;
776 unsigned int _uid;
777 int _status;
778 compat_x32_clock_t _utime;
779 compat_x32_clock_t _stime;
780 } _sigchld;
781
782 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
783 struct
784 {
785 unsigned int _addr;
786 } _sigfault;
787
788 /* SIGPOLL */
789 struct
790 {
791 int _band;
792 int _fd;
793 } _sigpoll;
794 } _sifields;
795 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
796
797 #define cpt_si_pid _sifields._kill._pid
798 #define cpt_si_uid _sifields._kill._uid
799 #define cpt_si_timerid _sifields._timer._tid
800 #define cpt_si_overrun _sifields._timer._overrun
801 #define cpt_si_status _sifields._sigchld._status
802 #define cpt_si_utime _sifields._sigchld._utime
803 #define cpt_si_stime _sifields._sigchld._stime
804 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
805 #define cpt_si_addr _sifields._sigfault._addr
806 #define cpt_si_band _sifields._sigpoll._band
807 #define cpt_si_fd _sifields._sigpoll._fd
808
809 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
810 In their place is si_timer1,si_timer2. */
811 #ifndef si_timerid
812 #define si_timerid si_timer1
813 #endif
814 #ifndef si_overrun
815 #define si_overrun si_timer2
816 #endif
817
818 static void
819 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
820 {
821 memset (to, 0, sizeof (*to));
822
823 to->si_signo = from->si_signo;
824 to->si_errno = from->si_errno;
825 to->si_code = from->si_code;
826
827 if (to->si_code == SI_TIMER)
828 {
829 to->cpt_si_timerid = from->si_timerid;
830 to->cpt_si_overrun = from->si_overrun;
831 to->cpt_si_ptr = (intptr_t) from->si_ptr;
832 }
833 else if (to->si_code == SI_USER)
834 {
835 to->cpt_si_pid = from->si_pid;
836 to->cpt_si_uid = from->si_uid;
837 }
838 else if (to->si_code < 0)
839 {
840 to->cpt_si_pid = from->si_pid;
841 to->cpt_si_uid = from->si_uid;
842 to->cpt_si_ptr = (intptr_t) from->si_ptr;
843 }
844 else
845 {
846 switch (to->si_signo)
847 {
848 case SIGCHLD:
849 to->cpt_si_pid = from->si_pid;
850 to->cpt_si_uid = from->si_uid;
851 to->cpt_si_status = from->si_status;
852 to->cpt_si_utime = from->si_utime;
853 to->cpt_si_stime = from->si_stime;
854 break;
855 case SIGILL:
856 case SIGFPE:
857 case SIGSEGV:
858 case SIGBUS:
859 to->cpt_si_addr = (intptr_t) from->si_addr;
860 break;
861 case SIGPOLL:
862 to->cpt_si_band = from->si_band;
863 to->cpt_si_fd = from->si_fd;
864 break;
865 default:
866 to->cpt_si_pid = from->si_pid;
867 to->cpt_si_uid = from->si_uid;
868 to->cpt_si_ptr = (intptr_t) from->si_ptr;
869 break;
870 }
871 }
872 }
873
874 static void
875 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
876 {
877 memset (to, 0, sizeof (*to));
878
879 to->si_signo = from->si_signo;
880 to->si_errno = from->si_errno;
881 to->si_code = from->si_code;
882
883 if (to->si_code == SI_TIMER)
884 {
885 to->si_timerid = from->cpt_si_timerid;
886 to->si_overrun = from->cpt_si_overrun;
887 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
888 }
889 else if (to->si_code == SI_USER)
890 {
891 to->si_pid = from->cpt_si_pid;
892 to->si_uid = from->cpt_si_uid;
893 }
894 else if (to->si_code < 0)
895 {
896 to->si_pid = from->cpt_si_pid;
897 to->si_uid = from->cpt_si_uid;
898 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
899 }
900 else
901 {
902 switch (to->si_signo)
903 {
904 case SIGCHLD:
905 to->si_pid = from->cpt_si_pid;
906 to->si_uid = from->cpt_si_uid;
907 to->si_status = from->cpt_si_status;
908 to->si_utime = from->cpt_si_utime;
909 to->si_stime = from->cpt_si_stime;
910 break;
911 case SIGILL:
912 case SIGFPE:
913 case SIGSEGV:
914 case SIGBUS:
915 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
916 break;
917 case SIGPOLL:
918 to->si_band = from->cpt_si_band;
919 to->si_fd = from->cpt_si_fd;
920 break;
921 default:
922 to->si_pid = from->cpt_si_pid;
923 to->si_uid = from->cpt_si_uid;
924 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
925 break;
926 }
927 }
928 }
929
930 static void
931 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
932 siginfo_t *from)
933 {
934 memset (to, 0, sizeof (*to));
935
936 to->si_signo = from->si_signo;
937 to->si_errno = from->si_errno;
938 to->si_code = from->si_code;
939
940 if (to->si_code == SI_TIMER)
941 {
942 to->cpt_si_timerid = from->si_timerid;
943 to->cpt_si_overrun = from->si_overrun;
944 to->cpt_si_ptr = (intptr_t) from->si_ptr;
945 }
946 else if (to->si_code == SI_USER)
947 {
948 to->cpt_si_pid = from->si_pid;
949 to->cpt_si_uid = from->si_uid;
950 }
951 else if (to->si_code < 0)
952 {
953 to->cpt_si_pid = from->si_pid;
954 to->cpt_si_uid = from->si_uid;
955 to->cpt_si_ptr = (intptr_t) from->si_ptr;
956 }
957 else
958 {
959 switch (to->si_signo)
960 {
961 case SIGCHLD:
962 to->cpt_si_pid = from->si_pid;
963 to->cpt_si_uid = from->si_uid;
964 to->cpt_si_status = from->si_status;
965 to->cpt_si_utime = from->si_utime;
966 to->cpt_si_stime = from->si_stime;
967 break;
968 case SIGILL:
969 case SIGFPE:
970 case SIGSEGV:
971 case SIGBUS:
972 to->cpt_si_addr = (intptr_t) from->si_addr;
973 break;
974 case SIGPOLL:
975 to->cpt_si_band = from->si_band;
976 to->cpt_si_fd = from->si_fd;
977 break;
978 default:
979 to->cpt_si_pid = from->si_pid;
980 to->cpt_si_uid = from->si_uid;
981 to->cpt_si_ptr = (intptr_t) from->si_ptr;
982 break;
983 }
984 }
985 }
986
987 static void
988 siginfo_from_compat_x32_siginfo (siginfo_t *to,
989 compat_x32_siginfo_t *from)
990 {
991 memset (to, 0, sizeof (*to));
992
993 to->si_signo = from->si_signo;
994 to->si_errno = from->si_errno;
995 to->si_code = from->si_code;
996
997 if (to->si_code == SI_TIMER)
998 {
999 to->si_timerid = from->cpt_si_timerid;
1000 to->si_overrun = from->cpt_si_overrun;
1001 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1002 }
1003 else if (to->si_code == SI_USER)
1004 {
1005 to->si_pid = from->cpt_si_pid;
1006 to->si_uid = from->cpt_si_uid;
1007 }
1008 else if (to->si_code < 0)
1009 {
1010 to->si_pid = from->cpt_si_pid;
1011 to->si_uid = from->cpt_si_uid;
1012 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1013 }
1014 else
1015 {
1016 switch (to->si_signo)
1017 {
1018 case SIGCHLD:
1019 to->si_pid = from->cpt_si_pid;
1020 to->si_uid = from->cpt_si_uid;
1021 to->si_status = from->cpt_si_status;
1022 to->si_utime = from->cpt_si_utime;
1023 to->si_stime = from->cpt_si_stime;
1024 break;
1025 case SIGILL:
1026 case SIGFPE:
1027 case SIGSEGV:
1028 case SIGBUS:
1029 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1030 break;
1031 case SIGPOLL:
1032 to->si_band = from->cpt_si_band;
1033 to->si_fd = from->cpt_si_fd;
1034 break;
1035 default:
1036 to->si_pid = from->cpt_si_pid;
1037 to->si_uid = from->cpt_si_uid;
1038 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1039 break;
1040 }
1041 }
1042 }
1043
1044 #endif /* __x86_64__ */
1045
1046 /* Convert a native/host siginfo object, into/from the siginfo in the
1047 layout of the inferiors' architecture. Returns true if any
1048 conversion was done; false otherwise. If DIRECTION is 1, then copy
1049 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1050 INF. */
1051
1052 static int
1053 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1054 {
1055 #ifdef __x86_64__
1056 unsigned int machine;
1057 int tid = lwpid_of (current_thread);
1058 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1059
1060 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1061 if (!is_64bit_tdesc ())
1062 {
1063 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1064
1065 if (direction == 0)
1066 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1067 else
1068 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1069
1070 return 1;
1071 }
1072 /* No fixup for native x32 GDB. */
1073 else if (!is_elf64 && sizeof (void *) == 8)
1074 {
1075 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1076
1077 if (direction == 0)
1078 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1079 native);
1080 else
1081 siginfo_from_compat_x32_siginfo (native,
1082 (struct compat_x32_siginfo *) inf);
1083
1084 return 1;
1085 }
1086 #endif
1087
1088 return 0;
1089 }
1090 \f
1091 static int use_xml;
1092
1093 /* Format of XSAVE extended state is:
1094 struct
1095 {
1096 fxsave_bytes[0..463]
1097 sw_usable_bytes[464..511]
1098 xstate_hdr_bytes[512..575]
1099 avx_bytes[576..831]
1100 future_state etc
1101 };
1102
1103 Same memory layout will be used for the coredump NT_X86_XSTATE
1104 representing the XSAVE extended state registers.
1105
1106 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1107 extended state mask, which is the same as the extended control register
1108 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1109 together with the mask saved in the xstate_hdr_bytes to determine what
1110 states the processor/OS supports and what state, used or initialized,
1111 the process/thread is in. */
1112 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1113
1114 /* Does the current host support the GETFPXREGS request? The header
1115 file may or may not define it, and even if it is defined, the
1116 kernel will return EIO if it's running on a pre-SSE processor. */
1117 int have_ptrace_getfpxregs =
1118 #ifdef HAVE_PTRACE_GETFPXREGS
1119 -1
1120 #else
1121 0
1122 #endif
1123 ;
1124
1125 /* Does the current host support PTRACE_GETREGSET? */
1126 static int have_ptrace_getregset = -1;
1127
1128 /* Get Linux/x86 target description from running target. */
1129
1130 static const struct target_desc *
1131 x86_linux_read_description (void)
1132 {
1133 unsigned int machine;
1134 int is_elf64;
1135 int xcr0_features;
1136 int tid;
1137 static uint64_t xcr0;
1138 struct regset_info *regset;
1139
1140 tid = lwpid_of (current_thread);
1141
1142 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1143
1144 if (sizeof (void *) == 4)
1145 {
1146 if (is_elf64 > 0)
1147 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1148 #ifndef __x86_64__
1149 else if (machine == EM_X86_64)
1150 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1151 #endif
1152 }
1153
1154 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1155 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1156 {
1157 elf_fpxregset_t fpxregs;
1158
1159 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1160 {
1161 have_ptrace_getfpxregs = 0;
1162 have_ptrace_getregset = 0;
1163 return tdesc_i386_mmx_linux;
1164 }
1165 else
1166 have_ptrace_getfpxregs = 1;
1167 }
1168 #endif
1169
1170 if (!use_xml)
1171 {
1172 x86_xcr0 = X86_XSTATE_SSE_MASK;
1173
1174 /* Don't use XML. */
1175 #ifdef __x86_64__
1176 if (machine == EM_X86_64)
1177 return tdesc_amd64_linux_no_xml;
1178 else
1179 #endif
1180 return tdesc_i386_linux_no_xml;
1181 }
1182
1183 if (have_ptrace_getregset == -1)
1184 {
1185 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1186 struct iovec iov;
1187
1188 iov.iov_base = xstateregs;
1189 iov.iov_len = sizeof (xstateregs);
1190
1191 /* Check if PTRACE_GETREGSET works. */
1192 if (ptrace (PTRACE_GETREGSET, tid,
1193 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1194 have_ptrace_getregset = 0;
1195 else
1196 {
1197 have_ptrace_getregset = 1;
1198
1199 /* Get XCR0 from XSAVE extended state. */
1200 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1201 / sizeof (uint64_t))];
1202
1203 /* Use PTRACE_GETREGSET if it is available. */
1204 for (regset = x86_regsets;
1205 regset->fill_function != NULL; regset++)
1206 if (regset->get_request == PTRACE_GETREGSET)
1207 regset->size = X86_XSTATE_SIZE (xcr0);
1208 else if (regset->type != GENERAL_REGS)
1209 regset->size = 0;
1210 }
1211 }
1212
1213 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1214 xcr0_features = (have_ptrace_getregset
1215 && (xcr0 & X86_XSTATE_ALL_MASK));
1216
1217 if (xcr0_features)
1218 x86_xcr0 = xcr0;
1219
1220 if (machine == EM_X86_64)
1221 {
1222 #ifdef __x86_64__
1223 if (is_elf64)
1224 {
1225 if (xcr0_features)
1226 {
1227 switch (xcr0 & X86_XSTATE_ALL_MASK)
1228 {
1229 case X86_XSTATE_AVX512_MASK:
1230 return tdesc_amd64_avx512_linux;
1231
1232 case X86_XSTATE_MPX_MASK:
1233 return tdesc_amd64_mpx_linux;
1234
1235 case X86_XSTATE_AVX_MASK:
1236 return tdesc_amd64_avx_linux;
1237
1238 default:
1239 return tdesc_amd64_linux;
1240 }
1241 }
1242 else
1243 return tdesc_amd64_linux;
1244 }
1245 else
1246 {
1247 if (xcr0_features)
1248 {
1249 switch (xcr0 & X86_XSTATE_ALL_MASK)
1250 {
1251 case X86_XSTATE_AVX512_MASK:
1252 return tdesc_x32_avx512_linux;
1253
1254 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1255 case X86_XSTATE_AVX_MASK:
1256 return tdesc_x32_avx_linux;
1257
1258 default:
1259 return tdesc_x32_linux;
1260 }
1261 }
1262 else
1263 return tdesc_x32_linux;
1264 }
1265 #endif
1266 }
1267 else
1268 {
1269 if (xcr0_features)
1270 {
1271 switch (xcr0 & X86_XSTATE_ALL_MASK)
1272 {
1273 case (X86_XSTATE_AVX512_MASK):
1274 return tdesc_i386_avx512_linux;
1275
1276 case (X86_XSTATE_MPX_MASK):
1277 return tdesc_i386_mpx_linux;
1278
1279 case (X86_XSTATE_AVX_MASK):
1280 return tdesc_i386_avx_linux;
1281
1282 default:
1283 return tdesc_i386_linux;
1284 }
1285 }
1286 else
1287 return tdesc_i386_linux;
1288 }
1289
1290 gdb_assert_not_reached ("failed to return tdesc");
1291 }
1292
1293 /* Callback for find_inferior. Stops iteration when a thread with a
1294 given PID is found. */
1295
1296 static int
1297 same_process_callback (struct inferior_list_entry *entry, void *data)
1298 {
1299 int pid = *(int *) data;
1300
1301 return (ptid_get_pid (entry->id) == pid);
1302 }
1303
1304 /* Callback for for_each_inferior. Calls the arch_setup routine for
1305 each process. */
1306
1307 static void
1308 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1309 {
1310 int pid = ptid_get_pid (entry->id);
1311
1312 /* Look up any thread of this processes. */
1313 current_thread
1314 = (struct thread_info *) find_inferior (&all_threads,
1315 same_process_callback, &pid);
1316
1317 the_low_target.arch_setup ();
1318 }
1319
1320 /* Update all the target description of all processes; a new GDB
1321 connected, and it may or not support xml target descriptions. */
1322
1323 static void
1324 x86_linux_update_xmltarget (void)
1325 {
1326 struct thread_info *saved_thread = current_thread;
1327
1328 /* Before changing the register cache's internal layout, flush the
1329 contents of the current valid caches back to the threads, and
1330 release the current regcache objects. */
1331 regcache_release ();
1332
1333 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1334
1335 current_thread = saved_thread;
1336 }
1337
1338 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1339 PTRACE_GETREGSET. */
1340
1341 static void
1342 x86_linux_process_qsupported (const char *query)
1343 {
1344 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1345 with "i386" in qSupported query, it supports x86 XML target
1346 descriptions. */
1347 use_xml = 0;
1348 if (query != NULL && startswith (query, "xmlRegisters="))
1349 {
1350 char *copy = xstrdup (query + 13);
1351 char *p;
1352
1353 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1354 {
1355 if (strcmp (p, "i386") == 0)
1356 {
1357 use_xml = 1;
1358 break;
1359 }
1360 }
1361
1362 free (copy);
1363 }
1364
1365 x86_linux_update_xmltarget ();
1366 }
1367
1368 /* Common for x86/x86-64. */
1369
1370 static struct regsets_info x86_regsets_info =
1371 {
1372 x86_regsets, /* regsets */
1373 0, /* num_regsets */
1374 NULL, /* disabled_regsets */
1375 };
1376
1377 #ifdef __x86_64__
1378 static struct regs_info amd64_linux_regs_info =
1379 {
1380 NULL, /* regset_bitmap */
1381 NULL, /* usrregs_info */
1382 &x86_regsets_info
1383 };
1384 #endif
1385 static struct usrregs_info i386_linux_usrregs_info =
1386 {
1387 I386_NUM_REGS,
1388 i386_regmap,
1389 };
1390
1391 static struct regs_info i386_linux_regs_info =
1392 {
1393 NULL, /* regset_bitmap */
1394 &i386_linux_usrregs_info,
1395 &x86_regsets_info
1396 };
1397
1398 const struct regs_info *
1399 x86_linux_regs_info (void)
1400 {
1401 #ifdef __x86_64__
1402 if (is_64bit_tdesc ())
1403 return &amd64_linux_regs_info;
1404 else
1405 #endif
1406 return &i386_linux_regs_info;
1407 }
1408
1409 /* Initialize the target description for the architecture of the
1410 inferior. */
1411
1412 static void
1413 x86_arch_setup (void)
1414 {
1415 current_process ()->tdesc = x86_linux_read_description ();
1416 }
1417
1418 static int
1419 x86_supports_tracepoints (void)
1420 {
1421 return 1;
1422 }
1423
1424 static void
1425 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1426 {
1427 write_inferior_memory (*to, buf, len);
1428 *to += len;
1429 }
1430
1431 static int
1432 push_opcode (unsigned char *buf, char *op)
1433 {
1434 unsigned char *buf_org = buf;
1435
1436 while (1)
1437 {
1438 char *endptr;
1439 unsigned long ul = strtoul (op, &endptr, 16);
1440
1441 if (endptr == op)
1442 break;
1443
1444 *buf++ = ul;
1445 op = endptr;
1446 }
1447
1448 return buf - buf_org;
1449 }
1450
1451 #ifdef __x86_64__
1452
1453 /* Build a jump pad that saves registers and calls a collection
1454 function. Writes a jump instruction to the jump pad to
1455 JJUMPAD_INSN. The caller is responsible to write it in at the
1456 tracepoint address. */
1457
1458 static int
1459 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1460 CORE_ADDR collector,
1461 CORE_ADDR lockaddr,
1462 ULONGEST orig_size,
1463 CORE_ADDR *jump_entry,
1464 CORE_ADDR *trampoline,
1465 ULONGEST *trampoline_size,
1466 unsigned char *jjump_pad_insn,
1467 ULONGEST *jjump_pad_insn_size,
1468 CORE_ADDR *adjusted_insn_addr,
1469 CORE_ADDR *adjusted_insn_addr_end,
1470 char *err)
1471 {
1472 unsigned char buf[40];
1473 int i, offset;
1474 int64_t loffset;
1475
1476 CORE_ADDR buildaddr = *jump_entry;
1477
1478 /* Build the jump pad. */
1479
1480 /* First, do tracepoint data collection. Save registers. */
1481 i = 0;
1482 /* Need to ensure stack pointer saved first. */
1483 buf[i++] = 0x54; /* push %rsp */
1484 buf[i++] = 0x55; /* push %rbp */
1485 buf[i++] = 0x57; /* push %rdi */
1486 buf[i++] = 0x56; /* push %rsi */
1487 buf[i++] = 0x52; /* push %rdx */
1488 buf[i++] = 0x51; /* push %rcx */
1489 buf[i++] = 0x53; /* push %rbx */
1490 buf[i++] = 0x50; /* push %rax */
1491 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1492 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1493 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1494 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1495 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1496 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1497 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1498 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1499 buf[i++] = 0x9c; /* pushfq */
1500 buf[i++] = 0x48; /* movl <addr>,%rdi */
1501 buf[i++] = 0xbf;
1502 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1503 i += sizeof (unsigned long);
1504 buf[i++] = 0x57; /* push %rdi */
1505 append_insns (&buildaddr, i, buf);
1506
1507 /* Stack space for the collecting_t object. */
1508 i = 0;
1509 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1510 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1511 memcpy (buf + i, &tpoint, 8);
1512 i += 8;
1513 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1514 i += push_opcode (&buf[i],
1515 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1516 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1517 append_insns (&buildaddr, i, buf);
1518
1519 /* spin-lock. */
1520 i = 0;
1521 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1522 memcpy (&buf[i], (void *) &lockaddr, 8);
1523 i += 8;
1524 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1525 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1526 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1527 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1528 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1529 append_insns (&buildaddr, i, buf);
1530
1531 /* Set up the gdb_collect call. */
1532 /* At this point, (stack pointer + 0x18) is the base of our saved
1533 register block. */
1534
1535 i = 0;
1536 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1537 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1538
1539 /* tpoint address may be 64-bit wide. */
1540 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1541 memcpy (buf + i, &tpoint, 8);
1542 i += 8;
1543 append_insns (&buildaddr, i, buf);
1544
1545 /* The collector function being in the shared library, may be
1546 >31-bits away off the jump pad. */
1547 i = 0;
1548 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1549 memcpy (buf + i, &collector, 8);
1550 i += 8;
1551 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1552 append_insns (&buildaddr, i, buf);
1553
1554 /* Clear the spin-lock. */
1555 i = 0;
1556 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1557 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1558 memcpy (buf + i, &lockaddr, 8);
1559 i += 8;
1560 append_insns (&buildaddr, i, buf);
1561
1562 /* Remove stack that had been used for the collect_t object. */
1563 i = 0;
1564 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1565 append_insns (&buildaddr, i, buf);
1566
1567 /* Restore register state. */
1568 i = 0;
1569 buf[i++] = 0x48; /* add $0x8,%rsp */
1570 buf[i++] = 0x83;
1571 buf[i++] = 0xc4;
1572 buf[i++] = 0x08;
1573 buf[i++] = 0x9d; /* popfq */
1574 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1575 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1576 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1577 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1578 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1579 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1580 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1581 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1582 buf[i++] = 0x58; /* pop %rax */
1583 buf[i++] = 0x5b; /* pop %rbx */
1584 buf[i++] = 0x59; /* pop %rcx */
1585 buf[i++] = 0x5a; /* pop %rdx */
1586 buf[i++] = 0x5e; /* pop %rsi */
1587 buf[i++] = 0x5f; /* pop %rdi */
1588 buf[i++] = 0x5d; /* pop %rbp */
1589 buf[i++] = 0x5c; /* pop %rsp */
1590 append_insns (&buildaddr, i, buf);
1591
1592 /* Now, adjust the original instruction to execute in the jump
1593 pad. */
1594 *adjusted_insn_addr = buildaddr;
1595 relocate_instruction (&buildaddr, tpaddr);
1596 *adjusted_insn_addr_end = buildaddr;
1597
1598 /* Finally, write a jump back to the program. */
1599
1600 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1601 if (loffset > INT_MAX || loffset < INT_MIN)
1602 {
1603 sprintf (err,
1604 "E.Jump back from jump pad too far from tracepoint "
1605 "(offset 0x%" PRIx64 " > int32).", loffset);
1606 return 1;
1607 }
1608
1609 offset = (int) loffset;
1610 memcpy (buf, jump_insn, sizeof (jump_insn));
1611 memcpy (buf + 1, &offset, 4);
1612 append_insns (&buildaddr, sizeof (jump_insn), buf);
1613
1614 /* The jump pad is now built. Wire in a jump to our jump pad. This
1615 is always done last (by our caller actually), so that we can
1616 install fast tracepoints with threads running. This relies on
1617 the agent's atomic write support. */
1618 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1619 if (loffset > INT_MAX || loffset < INT_MIN)
1620 {
1621 sprintf (err,
1622 "E.Jump pad too far from tracepoint "
1623 "(offset 0x%" PRIx64 " > int32).", loffset);
1624 return 1;
1625 }
1626
1627 offset = (int) loffset;
1628
1629 memcpy (buf, jump_insn, sizeof (jump_insn));
1630 memcpy (buf + 1, &offset, 4);
1631 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1632 *jjump_pad_insn_size = sizeof (jump_insn);
1633
1634 /* Return the end address of our pad. */
1635 *jump_entry = buildaddr;
1636
1637 return 0;
1638 }
1639
1640 #endif /* __x86_64__ */
1641
1642 /* Build a jump pad that saves registers and calls a collection
1643 function. Writes a jump instruction to the jump pad to
1644 JJUMPAD_INSN. The caller is responsible to write it in at the
1645 tracepoint address. */
1646
1647 static int
1648 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1649 CORE_ADDR collector,
1650 CORE_ADDR lockaddr,
1651 ULONGEST orig_size,
1652 CORE_ADDR *jump_entry,
1653 CORE_ADDR *trampoline,
1654 ULONGEST *trampoline_size,
1655 unsigned char *jjump_pad_insn,
1656 ULONGEST *jjump_pad_insn_size,
1657 CORE_ADDR *adjusted_insn_addr,
1658 CORE_ADDR *adjusted_insn_addr_end,
1659 char *err)
1660 {
1661 unsigned char buf[0x100];
1662 int i, offset;
1663 CORE_ADDR buildaddr = *jump_entry;
1664
1665 /* Build the jump pad. */
1666
1667 /* First, do tracepoint data collection. Save registers. */
1668 i = 0;
1669 buf[i++] = 0x60; /* pushad */
1670 buf[i++] = 0x68; /* push tpaddr aka $pc */
1671 *((int *)(buf + i)) = (int) tpaddr;
1672 i += 4;
1673 buf[i++] = 0x9c; /* pushf */
1674 buf[i++] = 0x1e; /* push %ds */
1675 buf[i++] = 0x06; /* push %es */
1676 buf[i++] = 0x0f; /* push %fs */
1677 buf[i++] = 0xa0;
1678 buf[i++] = 0x0f; /* push %gs */
1679 buf[i++] = 0xa8;
1680 buf[i++] = 0x16; /* push %ss */
1681 buf[i++] = 0x0e; /* push %cs */
1682 append_insns (&buildaddr, i, buf);
1683
1684 /* Stack space for the collecting_t object. */
1685 i = 0;
1686 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1687
1688 /* Build the object. */
1689 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1690 memcpy (buf + i, &tpoint, 4);
1691 i += 4;
1692 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1693
1694 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1695 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1696 append_insns (&buildaddr, i, buf);
1697
1698 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1699 If we cared for it, this could be using xchg alternatively. */
1700
1701 i = 0;
1702 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1703 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1704 %esp,<lockaddr> */
1705 memcpy (&buf[i], (void *) &lockaddr, 4);
1706 i += 4;
1707 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1708 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1709 append_insns (&buildaddr, i, buf);
1710
1711
1712 /* Set up arguments to the gdb_collect call. */
1713 i = 0;
1714 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1715 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1716 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1717 append_insns (&buildaddr, i, buf);
1718
1719 i = 0;
1720 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1721 append_insns (&buildaddr, i, buf);
1722
1723 i = 0;
1724 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1725 memcpy (&buf[i], (void *) &tpoint, 4);
1726 i += 4;
1727 append_insns (&buildaddr, i, buf);
1728
1729 buf[0] = 0xe8; /* call <reladdr> */
1730 offset = collector - (buildaddr + sizeof (jump_insn));
1731 memcpy (buf + 1, &offset, 4);
1732 append_insns (&buildaddr, 5, buf);
1733 /* Clean up after the call. */
1734 buf[0] = 0x83; /* add $0x8,%esp */
1735 buf[1] = 0xc4;
1736 buf[2] = 0x08;
1737 append_insns (&buildaddr, 3, buf);
1738
1739
1740 /* Clear the spin-lock. This would need the LOCK prefix on older
1741 broken archs. */
1742 i = 0;
1743 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1744 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1745 memcpy (buf + i, &lockaddr, 4);
1746 i += 4;
1747 append_insns (&buildaddr, i, buf);
1748
1749
1750 /* Remove stack that had been used for the collect_t object. */
1751 i = 0;
1752 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1753 append_insns (&buildaddr, i, buf);
1754
1755 i = 0;
1756 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1757 buf[i++] = 0xc4;
1758 buf[i++] = 0x04;
1759 buf[i++] = 0x17; /* pop %ss */
1760 buf[i++] = 0x0f; /* pop %gs */
1761 buf[i++] = 0xa9;
1762 buf[i++] = 0x0f; /* pop %fs */
1763 buf[i++] = 0xa1;
1764 buf[i++] = 0x07; /* pop %es */
1765 buf[i++] = 0x1f; /* pop %ds */
1766 buf[i++] = 0x9d; /* popf */
1767 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1768 buf[i++] = 0xc4;
1769 buf[i++] = 0x04;
1770 buf[i++] = 0x61; /* popad */
1771 append_insns (&buildaddr, i, buf);
1772
1773 /* Now, adjust the original instruction to execute in the jump
1774 pad. */
1775 *adjusted_insn_addr = buildaddr;
1776 relocate_instruction (&buildaddr, tpaddr);
1777 *adjusted_insn_addr_end = buildaddr;
1778
1779 /* Write the jump back to the program. */
1780 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1781 memcpy (buf, jump_insn, sizeof (jump_insn));
1782 memcpy (buf + 1, &offset, 4);
1783 append_insns (&buildaddr, sizeof (jump_insn), buf);
1784
1785 /* The jump pad is now built. Wire in a jump to our jump pad. This
1786 is always done last (by our caller actually), so that we can
1787 install fast tracepoints with threads running. This relies on
1788 the agent's atomic write support. */
1789 if (orig_size == 4)
1790 {
1791 /* Create a trampoline. */
1792 *trampoline_size = sizeof (jump_insn);
1793 if (!claim_trampoline_space (*trampoline_size, trampoline))
1794 {
1795 /* No trampoline space available. */
1796 strcpy (err,
1797 "E.Cannot allocate trampoline space needed for fast "
1798 "tracepoints on 4-byte instructions.");
1799 return 1;
1800 }
1801
1802 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1803 memcpy (buf, jump_insn, sizeof (jump_insn));
1804 memcpy (buf + 1, &offset, 4);
1805 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1806
1807 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1808 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1809 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1810 memcpy (buf + 2, &offset, 2);
1811 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1812 *jjump_pad_insn_size = sizeof (small_jump_insn);
1813 }
1814 else
1815 {
1816 /* Else use a 32-bit relative jump instruction. */
1817 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1818 memcpy (buf, jump_insn, sizeof (jump_insn));
1819 memcpy (buf + 1, &offset, 4);
1820 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1821 *jjump_pad_insn_size = sizeof (jump_insn);
1822 }
1823
1824 /* Return the end address of our pad. */
1825 *jump_entry = buildaddr;
1826
1827 return 0;
1828 }
1829
1830 static int
1831 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1832 CORE_ADDR collector,
1833 CORE_ADDR lockaddr,
1834 ULONGEST orig_size,
1835 CORE_ADDR *jump_entry,
1836 CORE_ADDR *trampoline,
1837 ULONGEST *trampoline_size,
1838 unsigned char *jjump_pad_insn,
1839 ULONGEST *jjump_pad_insn_size,
1840 CORE_ADDR *adjusted_insn_addr,
1841 CORE_ADDR *adjusted_insn_addr_end,
1842 char *err)
1843 {
1844 #ifdef __x86_64__
1845 if (is_64bit_tdesc ())
1846 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1847 collector, lockaddr,
1848 orig_size, jump_entry,
1849 trampoline, trampoline_size,
1850 jjump_pad_insn,
1851 jjump_pad_insn_size,
1852 adjusted_insn_addr,
1853 adjusted_insn_addr_end,
1854 err);
1855 #endif
1856
1857 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1858 collector, lockaddr,
1859 orig_size, jump_entry,
1860 trampoline, trampoline_size,
1861 jjump_pad_insn,
1862 jjump_pad_insn_size,
1863 adjusted_insn_addr,
1864 adjusted_insn_addr_end,
1865 err);
1866 }
1867
1868 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1869 architectures. */
1870
1871 static int
1872 x86_get_min_fast_tracepoint_insn_len (void)
1873 {
1874 static int warned_about_fast_tracepoints = 0;
1875
1876 #ifdef __x86_64__
1877 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1878 used for fast tracepoints. */
1879 if (is_64bit_tdesc ())
1880 return 5;
1881 #endif
1882
1883 if (agent_loaded_p ())
1884 {
1885 char errbuf[IPA_BUFSIZ];
1886
1887 errbuf[0] = '\0';
1888
1889 /* On x86, if trampolines are available, then 4-byte jump instructions
1890 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1891 with a 4-byte offset are used instead. */
1892 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1893 return 4;
1894 else
1895 {
1896 /* GDB has no channel to explain to user why a shorter fast
1897 tracepoint is not possible, but at least make GDBserver
1898 mention that something has gone awry. */
1899 if (!warned_about_fast_tracepoints)
1900 {
1901 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1902 warned_about_fast_tracepoints = 1;
1903 }
1904 return 5;
1905 }
1906 }
1907 else
1908 {
1909 /* Indicate that the minimum length is currently unknown since the IPA
1910 has not loaded yet. */
1911 return 0;
1912 }
1913 }
1914
1915 static void
1916 add_insns (unsigned char *start, int len)
1917 {
1918 CORE_ADDR buildaddr = current_insn_ptr;
1919
1920 if (debug_threads)
1921 debug_printf ("Adding %d bytes of insn at %s\n",
1922 len, paddress (buildaddr));
1923
1924 append_insns (&buildaddr, len, start);
1925 current_insn_ptr = buildaddr;
1926 }
1927
1928 /* Our general strategy for emitting code is to avoid specifying raw
1929 bytes whenever possible, and instead copy a block of inline asm
1930 that is embedded in the function. This is a little messy, because
1931 we need to keep the compiler from discarding what looks like dead
1932 code, plus suppress various warnings. */
1933
1934 #define EMIT_ASM(NAME, INSNS) \
1935 do \
1936 { \
1937 extern unsigned char start_ ## NAME, end_ ## NAME; \
1938 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1939 __asm__ ("jmp end_" #NAME "\n" \
1940 "\t" "start_" #NAME ":" \
1941 "\t" INSNS "\n" \
1942 "\t" "end_" #NAME ":"); \
1943 } while (0)
1944
1945 #ifdef __x86_64__
1946
1947 #define EMIT_ASM32(NAME,INSNS) \
1948 do \
1949 { \
1950 extern unsigned char start_ ## NAME, end_ ## NAME; \
1951 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1952 __asm__ (".code32\n" \
1953 "\t" "jmp end_" #NAME "\n" \
1954 "\t" "start_" #NAME ":\n" \
1955 "\t" INSNS "\n" \
1956 "\t" "end_" #NAME ":\n" \
1957 ".code64\n"); \
1958 } while (0)
1959
1960 #else
1961
1962 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1963
1964 #endif
1965
1966 #ifdef __x86_64__
1967
1968 static void
1969 amd64_emit_prologue (void)
1970 {
1971 EMIT_ASM (amd64_prologue,
1972 "pushq %rbp\n\t"
1973 "movq %rsp,%rbp\n\t"
1974 "sub $0x20,%rsp\n\t"
1975 "movq %rdi,-8(%rbp)\n\t"
1976 "movq %rsi,-16(%rbp)");
1977 }
1978
1979
1980 static void
1981 amd64_emit_epilogue (void)
1982 {
1983 EMIT_ASM (amd64_epilogue,
1984 "movq -16(%rbp),%rdi\n\t"
1985 "movq %rax,(%rdi)\n\t"
1986 "xor %rax,%rax\n\t"
1987 "leave\n\t"
1988 "ret");
1989 }
1990
1991 static void
1992 amd64_emit_add (void)
1993 {
1994 EMIT_ASM (amd64_add,
1995 "add (%rsp),%rax\n\t"
1996 "lea 0x8(%rsp),%rsp");
1997 }
1998
1999 static void
2000 amd64_emit_sub (void)
2001 {
2002 EMIT_ASM (amd64_sub,
2003 "sub %rax,(%rsp)\n\t"
2004 "pop %rax");
2005 }
2006
2007 static void
2008 amd64_emit_mul (void)
2009 {
2010 emit_error = 1;
2011 }
2012
2013 static void
2014 amd64_emit_lsh (void)
2015 {
2016 emit_error = 1;
2017 }
2018
2019 static void
2020 amd64_emit_rsh_signed (void)
2021 {
2022 emit_error = 1;
2023 }
2024
2025 static void
2026 amd64_emit_rsh_unsigned (void)
2027 {
2028 emit_error = 1;
2029 }
2030
2031 static void
2032 amd64_emit_ext (int arg)
2033 {
2034 switch (arg)
2035 {
2036 case 8:
2037 EMIT_ASM (amd64_ext_8,
2038 "cbtw\n\t"
2039 "cwtl\n\t"
2040 "cltq");
2041 break;
2042 case 16:
2043 EMIT_ASM (amd64_ext_16,
2044 "cwtl\n\t"
2045 "cltq");
2046 break;
2047 case 32:
2048 EMIT_ASM (amd64_ext_32,
2049 "cltq");
2050 break;
2051 default:
2052 emit_error = 1;
2053 }
2054 }
2055
2056 static void
2057 amd64_emit_log_not (void)
2058 {
2059 EMIT_ASM (amd64_log_not,
2060 "test %rax,%rax\n\t"
2061 "sete %cl\n\t"
2062 "movzbq %cl,%rax");
2063 }
2064
2065 static void
2066 amd64_emit_bit_and (void)
2067 {
2068 EMIT_ASM (amd64_and,
2069 "and (%rsp),%rax\n\t"
2070 "lea 0x8(%rsp),%rsp");
2071 }
2072
2073 static void
2074 amd64_emit_bit_or (void)
2075 {
2076 EMIT_ASM (amd64_or,
2077 "or (%rsp),%rax\n\t"
2078 "lea 0x8(%rsp),%rsp");
2079 }
2080
2081 static void
2082 amd64_emit_bit_xor (void)
2083 {
2084 EMIT_ASM (amd64_xor,
2085 "xor (%rsp),%rax\n\t"
2086 "lea 0x8(%rsp),%rsp");
2087 }
2088
2089 static void
2090 amd64_emit_bit_not (void)
2091 {
2092 EMIT_ASM (amd64_bit_not,
2093 "xorq $0xffffffffffffffff,%rax");
2094 }
2095
2096 static void
2097 amd64_emit_equal (void)
2098 {
2099 EMIT_ASM (amd64_equal,
2100 "cmp %rax,(%rsp)\n\t"
2101 "je .Lamd64_equal_true\n\t"
2102 "xor %rax,%rax\n\t"
2103 "jmp .Lamd64_equal_end\n\t"
2104 ".Lamd64_equal_true:\n\t"
2105 "mov $0x1,%rax\n\t"
2106 ".Lamd64_equal_end:\n\t"
2107 "lea 0x8(%rsp),%rsp");
2108 }
2109
2110 static void
2111 amd64_emit_less_signed (void)
2112 {
2113 EMIT_ASM (amd64_less_signed,
2114 "cmp %rax,(%rsp)\n\t"
2115 "jl .Lamd64_less_signed_true\n\t"
2116 "xor %rax,%rax\n\t"
2117 "jmp .Lamd64_less_signed_end\n\t"
2118 ".Lamd64_less_signed_true:\n\t"
2119 "mov $1,%rax\n\t"
2120 ".Lamd64_less_signed_end:\n\t"
2121 "lea 0x8(%rsp),%rsp");
2122 }
2123
2124 static void
2125 amd64_emit_less_unsigned (void)
2126 {
2127 EMIT_ASM (amd64_less_unsigned,
2128 "cmp %rax,(%rsp)\n\t"
2129 "jb .Lamd64_less_unsigned_true\n\t"
2130 "xor %rax,%rax\n\t"
2131 "jmp .Lamd64_less_unsigned_end\n\t"
2132 ".Lamd64_less_unsigned_true:\n\t"
2133 "mov $1,%rax\n\t"
2134 ".Lamd64_less_unsigned_end:\n\t"
2135 "lea 0x8(%rsp),%rsp");
2136 }
2137
2138 static void
2139 amd64_emit_ref (int size)
2140 {
2141 switch (size)
2142 {
2143 case 1:
2144 EMIT_ASM (amd64_ref1,
2145 "movb (%rax),%al");
2146 break;
2147 case 2:
2148 EMIT_ASM (amd64_ref2,
2149 "movw (%rax),%ax");
2150 break;
2151 case 4:
2152 EMIT_ASM (amd64_ref4,
2153 "movl (%rax),%eax");
2154 break;
2155 case 8:
2156 EMIT_ASM (amd64_ref8,
2157 "movq (%rax),%rax");
2158 break;
2159 }
2160 }
2161
2162 static void
2163 amd64_emit_if_goto (int *offset_p, int *size_p)
2164 {
2165 EMIT_ASM (amd64_if_goto,
2166 "mov %rax,%rcx\n\t"
2167 "pop %rax\n\t"
2168 "cmp $0,%rcx\n\t"
2169 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2170 if (offset_p)
2171 *offset_p = 10;
2172 if (size_p)
2173 *size_p = 4;
2174 }
2175
2176 static void
2177 amd64_emit_goto (int *offset_p, int *size_p)
2178 {
2179 EMIT_ASM (amd64_goto,
2180 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2181 if (offset_p)
2182 *offset_p = 1;
2183 if (size_p)
2184 *size_p = 4;
2185 }
2186
2187 static void
2188 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2189 {
2190 int diff = (to - (from + size));
2191 unsigned char buf[sizeof (int)];
2192
2193 if (size != 4)
2194 {
2195 emit_error = 1;
2196 return;
2197 }
2198
2199 memcpy (buf, &diff, sizeof (int));
2200 write_inferior_memory (from, buf, sizeof (int));
2201 }
2202
2203 static void
2204 amd64_emit_const (LONGEST num)
2205 {
2206 unsigned char buf[16];
2207 int i;
2208 CORE_ADDR buildaddr = current_insn_ptr;
2209
2210 i = 0;
2211 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2212 memcpy (&buf[i], &num, sizeof (num));
2213 i += 8;
2214 append_insns (&buildaddr, i, buf);
2215 current_insn_ptr = buildaddr;
2216 }
2217
2218 static void
2219 amd64_emit_call (CORE_ADDR fn)
2220 {
2221 unsigned char buf[16];
2222 int i;
2223 CORE_ADDR buildaddr;
2224 LONGEST offset64;
2225
2226 /* The destination function being in the shared library, may be
2227 >31-bits away off the compiled code pad. */
2228
2229 buildaddr = current_insn_ptr;
2230
2231 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2232
2233 i = 0;
2234
2235 if (offset64 > INT_MAX || offset64 < INT_MIN)
2236 {
2237 /* Offset is too large for a call. Use callq, but that requires
2238 a register, so avoid it if possible. Use r10, since it is
2239 call-clobbered, we don't have to push/pop it. */
2240 buf[i++] = 0x48; /* mov $fn,%r10 */
2241 buf[i++] = 0xba;
2242 memcpy (buf + i, &fn, 8);
2243 i += 8;
2244 buf[i++] = 0xff; /* callq *%r10 */
2245 buf[i++] = 0xd2;
2246 }
2247 else
2248 {
2249 int offset32 = offset64; /* we know we can't overflow here. */
2250 memcpy (buf + i, &offset32, 4);
2251 i += 4;
2252 }
2253
2254 append_insns (&buildaddr, i, buf);
2255 current_insn_ptr = buildaddr;
2256 }
2257
2258 static void
2259 amd64_emit_reg (int reg)
2260 {
2261 unsigned char buf[16];
2262 int i;
2263 CORE_ADDR buildaddr;
2264
2265 /* Assume raw_regs is still in %rdi. */
2266 buildaddr = current_insn_ptr;
2267 i = 0;
2268 buf[i++] = 0xbe; /* mov $<n>,%esi */
2269 memcpy (&buf[i], &reg, sizeof (reg));
2270 i += 4;
2271 append_insns (&buildaddr, i, buf);
2272 current_insn_ptr = buildaddr;
2273 amd64_emit_call (get_raw_reg_func_addr ());
2274 }
2275
2276 static void
2277 amd64_emit_pop (void)
2278 {
2279 EMIT_ASM (amd64_pop,
2280 "pop %rax");
2281 }
2282
2283 static void
2284 amd64_emit_stack_flush (void)
2285 {
2286 EMIT_ASM (amd64_stack_flush,
2287 "push %rax");
2288 }
2289
2290 static void
2291 amd64_emit_zero_ext (int arg)
2292 {
2293 switch (arg)
2294 {
2295 case 8:
2296 EMIT_ASM (amd64_zero_ext_8,
2297 "and $0xff,%rax");
2298 break;
2299 case 16:
2300 EMIT_ASM (amd64_zero_ext_16,
2301 "and $0xffff,%rax");
2302 break;
2303 case 32:
2304 EMIT_ASM (amd64_zero_ext_32,
2305 "mov $0xffffffff,%rcx\n\t"
2306 "and %rcx,%rax");
2307 break;
2308 default:
2309 emit_error = 1;
2310 }
2311 }
2312
2313 static void
2314 amd64_emit_swap (void)
2315 {
2316 EMIT_ASM (amd64_swap,
2317 "mov %rax,%rcx\n\t"
2318 "pop %rax\n\t"
2319 "push %rcx");
2320 }
2321
2322 static void
2323 amd64_emit_stack_adjust (int n)
2324 {
2325 unsigned char buf[16];
2326 int i;
2327 CORE_ADDR buildaddr = current_insn_ptr;
2328
2329 i = 0;
2330 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2331 buf[i++] = 0x8d;
2332 buf[i++] = 0x64;
2333 buf[i++] = 0x24;
2334 /* This only handles adjustments up to 16, but we don't expect any more. */
2335 buf[i++] = n * 8;
2336 append_insns (&buildaddr, i, buf);
2337 current_insn_ptr = buildaddr;
2338 }
2339
2340 /* FN's prototype is `LONGEST(*fn)(int)'. */
2341
2342 static void
2343 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2344 {
2345 unsigned char buf[16];
2346 int i;
2347 CORE_ADDR buildaddr;
2348
2349 buildaddr = current_insn_ptr;
2350 i = 0;
2351 buf[i++] = 0xbf; /* movl $<n>,%edi */
2352 memcpy (&buf[i], &arg1, sizeof (arg1));
2353 i += 4;
2354 append_insns (&buildaddr, i, buf);
2355 current_insn_ptr = buildaddr;
2356 amd64_emit_call (fn);
2357 }
2358
2359 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2360
2361 static void
2362 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2363 {
2364 unsigned char buf[16];
2365 int i;
2366 CORE_ADDR buildaddr;
2367
2368 buildaddr = current_insn_ptr;
2369 i = 0;
2370 buf[i++] = 0xbf; /* movl $<n>,%edi */
2371 memcpy (&buf[i], &arg1, sizeof (arg1));
2372 i += 4;
2373 append_insns (&buildaddr, i, buf);
2374 current_insn_ptr = buildaddr;
2375 EMIT_ASM (amd64_void_call_2_a,
2376 /* Save away a copy of the stack top. */
2377 "push %rax\n\t"
2378 /* Also pass top as the second argument. */
2379 "mov %rax,%rsi");
2380 amd64_emit_call (fn);
2381 EMIT_ASM (amd64_void_call_2_b,
2382 /* Restore the stack top, %rax may have been trashed. */
2383 "pop %rax");
2384 }
2385
2386 void
2387 amd64_emit_eq_goto (int *offset_p, int *size_p)
2388 {
2389 EMIT_ASM (amd64_eq,
2390 "cmp %rax,(%rsp)\n\t"
2391 "jne .Lamd64_eq_fallthru\n\t"
2392 "lea 0x8(%rsp),%rsp\n\t"
2393 "pop %rax\n\t"
2394 /* jmp, but don't trust the assembler to choose the right jump */
2395 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2396 ".Lamd64_eq_fallthru:\n\t"
2397 "lea 0x8(%rsp),%rsp\n\t"
2398 "pop %rax");
2399
2400 if (offset_p)
2401 *offset_p = 13;
2402 if (size_p)
2403 *size_p = 4;
2404 }
2405
2406 void
2407 amd64_emit_ne_goto (int *offset_p, int *size_p)
2408 {
2409 EMIT_ASM (amd64_ne,
2410 "cmp %rax,(%rsp)\n\t"
2411 "je .Lamd64_ne_fallthru\n\t"
2412 "lea 0x8(%rsp),%rsp\n\t"
2413 "pop %rax\n\t"
2414 /* jmp, but don't trust the assembler to choose the right jump */
2415 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2416 ".Lamd64_ne_fallthru:\n\t"
2417 "lea 0x8(%rsp),%rsp\n\t"
2418 "pop %rax");
2419
2420 if (offset_p)
2421 *offset_p = 13;
2422 if (size_p)
2423 *size_p = 4;
2424 }
2425
2426 void
2427 amd64_emit_lt_goto (int *offset_p, int *size_p)
2428 {
2429 EMIT_ASM (amd64_lt,
2430 "cmp %rax,(%rsp)\n\t"
2431 "jnl .Lamd64_lt_fallthru\n\t"
2432 "lea 0x8(%rsp),%rsp\n\t"
2433 "pop %rax\n\t"
2434 /* jmp, but don't trust the assembler to choose the right jump */
2435 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2436 ".Lamd64_lt_fallthru:\n\t"
2437 "lea 0x8(%rsp),%rsp\n\t"
2438 "pop %rax");
2439
2440 if (offset_p)
2441 *offset_p = 13;
2442 if (size_p)
2443 *size_p = 4;
2444 }
2445
2446 void
2447 amd64_emit_le_goto (int *offset_p, int *size_p)
2448 {
2449 EMIT_ASM (amd64_le,
2450 "cmp %rax,(%rsp)\n\t"
2451 "jnle .Lamd64_le_fallthru\n\t"
2452 "lea 0x8(%rsp),%rsp\n\t"
2453 "pop %rax\n\t"
2454 /* jmp, but don't trust the assembler to choose the right jump */
2455 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2456 ".Lamd64_le_fallthru:\n\t"
2457 "lea 0x8(%rsp),%rsp\n\t"
2458 "pop %rax");
2459
2460 if (offset_p)
2461 *offset_p = 13;
2462 if (size_p)
2463 *size_p = 4;
2464 }
2465
2466 void
2467 amd64_emit_gt_goto (int *offset_p, int *size_p)
2468 {
2469 EMIT_ASM (amd64_gt,
2470 "cmp %rax,(%rsp)\n\t"
2471 "jng .Lamd64_gt_fallthru\n\t"
2472 "lea 0x8(%rsp),%rsp\n\t"
2473 "pop %rax\n\t"
2474 /* jmp, but don't trust the assembler to choose the right jump */
2475 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2476 ".Lamd64_gt_fallthru:\n\t"
2477 "lea 0x8(%rsp),%rsp\n\t"
2478 "pop %rax");
2479
2480 if (offset_p)
2481 *offset_p = 13;
2482 if (size_p)
2483 *size_p = 4;
2484 }
2485
2486 void
2487 amd64_emit_ge_goto (int *offset_p, int *size_p)
2488 {
2489 EMIT_ASM (amd64_ge,
2490 "cmp %rax,(%rsp)\n\t"
2491 "jnge .Lamd64_ge_fallthru\n\t"
2492 ".Lamd64_ge_jump:\n\t"
2493 "lea 0x8(%rsp),%rsp\n\t"
2494 "pop %rax\n\t"
2495 /* jmp, but don't trust the assembler to choose the right jump */
2496 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2497 ".Lamd64_ge_fallthru:\n\t"
2498 "lea 0x8(%rsp),%rsp\n\t"
2499 "pop %rax");
2500
2501 if (offset_p)
2502 *offset_p = 13;
2503 if (size_p)
2504 *size_p = 4;
2505 }
2506
2507 struct emit_ops amd64_emit_ops =
2508 {
2509 amd64_emit_prologue,
2510 amd64_emit_epilogue,
2511 amd64_emit_add,
2512 amd64_emit_sub,
2513 amd64_emit_mul,
2514 amd64_emit_lsh,
2515 amd64_emit_rsh_signed,
2516 amd64_emit_rsh_unsigned,
2517 amd64_emit_ext,
2518 amd64_emit_log_not,
2519 amd64_emit_bit_and,
2520 amd64_emit_bit_or,
2521 amd64_emit_bit_xor,
2522 amd64_emit_bit_not,
2523 amd64_emit_equal,
2524 amd64_emit_less_signed,
2525 amd64_emit_less_unsigned,
2526 amd64_emit_ref,
2527 amd64_emit_if_goto,
2528 amd64_emit_goto,
2529 amd64_write_goto_address,
2530 amd64_emit_const,
2531 amd64_emit_call,
2532 amd64_emit_reg,
2533 amd64_emit_pop,
2534 amd64_emit_stack_flush,
2535 amd64_emit_zero_ext,
2536 amd64_emit_swap,
2537 amd64_emit_stack_adjust,
2538 amd64_emit_int_call_1,
2539 amd64_emit_void_call_2,
2540 amd64_emit_eq_goto,
2541 amd64_emit_ne_goto,
2542 amd64_emit_lt_goto,
2543 amd64_emit_le_goto,
2544 amd64_emit_gt_goto,
2545 amd64_emit_ge_goto
2546 };
2547
2548 #endif /* __x86_64__ */
2549
2550 static void
2551 i386_emit_prologue (void)
2552 {
2553 EMIT_ASM32 (i386_prologue,
2554 "push %ebp\n\t"
2555 "mov %esp,%ebp\n\t"
2556 "push %ebx");
2557 /* At this point, the raw regs base address is at 8(%ebp), and the
2558 value pointer is at 12(%ebp). */
2559 }
2560
2561 static void
2562 i386_emit_epilogue (void)
2563 {
2564 EMIT_ASM32 (i386_epilogue,
2565 "mov 12(%ebp),%ecx\n\t"
2566 "mov %eax,(%ecx)\n\t"
2567 "mov %ebx,0x4(%ecx)\n\t"
2568 "xor %eax,%eax\n\t"
2569 "pop %ebx\n\t"
2570 "pop %ebp\n\t"
2571 "ret");
2572 }
2573
2574 static void
2575 i386_emit_add (void)
2576 {
2577 EMIT_ASM32 (i386_add,
2578 "add (%esp),%eax\n\t"
2579 "adc 0x4(%esp),%ebx\n\t"
2580 "lea 0x8(%esp),%esp");
2581 }
2582
2583 static void
2584 i386_emit_sub (void)
2585 {
2586 EMIT_ASM32 (i386_sub,
2587 "subl %eax,(%esp)\n\t"
2588 "sbbl %ebx,4(%esp)\n\t"
2589 "pop %eax\n\t"
2590 "pop %ebx\n\t");
2591 }
2592
2593 static void
2594 i386_emit_mul (void)
2595 {
2596 emit_error = 1;
2597 }
2598
2599 static void
2600 i386_emit_lsh (void)
2601 {
2602 emit_error = 1;
2603 }
2604
2605 static void
2606 i386_emit_rsh_signed (void)
2607 {
2608 emit_error = 1;
2609 }
2610
2611 static void
2612 i386_emit_rsh_unsigned (void)
2613 {
2614 emit_error = 1;
2615 }
2616
2617 static void
2618 i386_emit_ext (int arg)
2619 {
2620 switch (arg)
2621 {
2622 case 8:
2623 EMIT_ASM32 (i386_ext_8,
2624 "cbtw\n\t"
2625 "cwtl\n\t"
2626 "movl %eax,%ebx\n\t"
2627 "sarl $31,%ebx");
2628 break;
2629 case 16:
2630 EMIT_ASM32 (i386_ext_16,
2631 "cwtl\n\t"
2632 "movl %eax,%ebx\n\t"
2633 "sarl $31,%ebx");
2634 break;
2635 case 32:
2636 EMIT_ASM32 (i386_ext_32,
2637 "movl %eax,%ebx\n\t"
2638 "sarl $31,%ebx");
2639 break;
2640 default:
2641 emit_error = 1;
2642 }
2643 }
2644
2645 static void
2646 i386_emit_log_not (void)
2647 {
2648 EMIT_ASM32 (i386_log_not,
2649 "or %ebx,%eax\n\t"
2650 "test %eax,%eax\n\t"
2651 "sete %cl\n\t"
2652 "xor %ebx,%ebx\n\t"
2653 "movzbl %cl,%eax");
2654 }
2655
2656 static void
2657 i386_emit_bit_and (void)
2658 {
2659 EMIT_ASM32 (i386_and,
2660 "and (%esp),%eax\n\t"
2661 "and 0x4(%esp),%ebx\n\t"
2662 "lea 0x8(%esp),%esp");
2663 }
2664
2665 static void
2666 i386_emit_bit_or (void)
2667 {
2668 EMIT_ASM32 (i386_or,
2669 "or (%esp),%eax\n\t"
2670 "or 0x4(%esp),%ebx\n\t"
2671 "lea 0x8(%esp),%esp");
2672 }
2673
2674 static void
2675 i386_emit_bit_xor (void)
2676 {
2677 EMIT_ASM32 (i386_xor,
2678 "xor (%esp),%eax\n\t"
2679 "xor 0x4(%esp),%ebx\n\t"
2680 "lea 0x8(%esp),%esp");
2681 }
2682
2683 static void
2684 i386_emit_bit_not (void)
2685 {
2686 EMIT_ASM32 (i386_bit_not,
2687 "xor $0xffffffff,%eax\n\t"
2688 "xor $0xffffffff,%ebx\n\t");
2689 }
2690
2691 static void
2692 i386_emit_equal (void)
2693 {
2694 EMIT_ASM32 (i386_equal,
2695 "cmpl %ebx,4(%esp)\n\t"
2696 "jne .Li386_equal_false\n\t"
2697 "cmpl %eax,(%esp)\n\t"
2698 "je .Li386_equal_true\n\t"
2699 ".Li386_equal_false:\n\t"
2700 "xor %eax,%eax\n\t"
2701 "jmp .Li386_equal_end\n\t"
2702 ".Li386_equal_true:\n\t"
2703 "mov $1,%eax\n\t"
2704 ".Li386_equal_end:\n\t"
2705 "xor %ebx,%ebx\n\t"
2706 "lea 0x8(%esp),%esp");
2707 }
2708
2709 static void
2710 i386_emit_less_signed (void)
2711 {
2712 EMIT_ASM32 (i386_less_signed,
2713 "cmpl %ebx,4(%esp)\n\t"
2714 "jl .Li386_less_signed_true\n\t"
2715 "jne .Li386_less_signed_false\n\t"
2716 "cmpl %eax,(%esp)\n\t"
2717 "jl .Li386_less_signed_true\n\t"
2718 ".Li386_less_signed_false:\n\t"
2719 "xor %eax,%eax\n\t"
2720 "jmp .Li386_less_signed_end\n\t"
2721 ".Li386_less_signed_true:\n\t"
2722 "mov $1,%eax\n\t"
2723 ".Li386_less_signed_end:\n\t"
2724 "xor %ebx,%ebx\n\t"
2725 "lea 0x8(%esp),%esp");
2726 }
2727
2728 static void
2729 i386_emit_less_unsigned (void)
2730 {
2731 EMIT_ASM32 (i386_less_unsigned,
2732 "cmpl %ebx,4(%esp)\n\t"
2733 "jb .Li386_less_unsigned_true\n\t"
2734 "jne .Li386_less_unsigned_false\n\t"
2735 "cmpl %eax,(%esp)\n\t"
2736 "jb .Li386_less_unsigned_true\n\t"
2737 ".Li386_less_unsigned_false:\n\t"
2738 "xor %eax,%eax\n\t"
2739 "jmp .Li386_less_unsigned_end\n\t"
2740 ".Li386_less_unsigned_true:\n\t"
2741 "mov $1,%eax\n\t"
2742 ".Li386_less_unsigned_end:\n\t"
2743 "xor %ebx,%ebx\n\t"
2744 "lea 0x8(%esp),%esp");
2745 }
2746
2747 static void
2748 i386_emit_ref (int size)
2749 {
2750 switch (size)
2751 {
2752 case 1:
2753 EMIT_ASM32 (i386_ref1,
2754 "movb (%eax),%al");
2755 break;
2756 case 2:
2757 EMIT_ASM32 (i386_ref2,
2758 "movw (%eax),%ax");
2759 break;
2760 case 4:
2761 EMIT_ASM32 (i386_ref4,
2762 "movl (%eax),%eax");
2763 break;
2764 case 8:
2765 EMIT_ASM32 (i386_ref8,
2766 "movl 4(%eax),%ebx\n\t"
2767 "movl (%eax),%eax");
2768 break;
2769 }
2770 }
2771
2772 static void
2773 i386_emit_if_goto (int *offset_p, int *size_p)
2774 {
2775 EMIT_ASM32 (i386_if_goto,
2776 "mov %eax,%ecx\n\t"
2777 "or %ebx,%ecx\n\t"
2778 "pop %eax\n\t"
2779 "pop %ebx\n\t"
2780 "cmpl $0,%ecx\n\t"
2781 /* Don't trust the assembler to choose the right jump */
2782 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2783
2784 if (offset_p)
2785 *offset_p = 11; /* be sure that this matches the sequence above */
2786 if (size_p)
2787 *size_p = 4;
2788 }
2789
2790 static void
2791 i386_emit_goto (int *offset_p, int *size_p)
2792 {
2793 EMIT_ASM32 (i386_goto,
2794 /* Don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2796 if (offset_p)
2797 *offset_p = 1;
2798 if (size_p)
2799 *size_p = 4;
2800 }
2801
2802 static void
2803 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2804 {
2805 int diff = (to - (from + size));
2806 unsigned char buf[sizeof (int)];
2807
2808 /* We're only doing 4-byte sizes at the moment. */
2809 if (size != 4)
2810 {
2811 emit_error = 1;
2812 return;
2813 }
2814
2815 memcpy (buf, &diff, sizeof (int));
2816 write_inferior_memory (from, buf, sizeof (int));
2817 }
2818
2819 static void
2820 i386_emit_const (LONGEST num)
2821 {
2822 unsigned char buf[16];
2823 int i, hi, lo;
2824 CORE_ADDR buildaddr = current_insn_ptr;
2825
2826 i = 0;
2827 buf[i++] = 0xb8; /* mov $<n>,%eax */
2828 lo = num & 0xffffffff;
2829 memcpy (&buf[i], &lo, sizeof (lo));
2830 i += 4;
2831 hi = ((num >> 32) & 0xffffffff);
2832 if (hi)
2833 {
2834 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2835 memcpy (&buf[i], &hi, sizeof (hi));
2836 i += 4;
2837 }
2838 else
2839 {
2840 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2841 }
2842 append_insns (&buildaddr, i, buf);
2843 current_insn_ptr = buildaddr;
2844 }
2845
2846 static void
2847 i386_emit_call (CORE_ADDR fn)
2848 {
2849 unsigned char buf[16];
2850 int i, offset;
2851 CORE_ADDR buildaddr;
2852
2853 buildaddr = current_insn_ptr;
2854 i = 0;
2855 buf[i++] = 0xe8; /* call <reladdr> */
2856 offset = ((int) fn) - (buildaddr + 5);
2857 memcpy (buf + 1, &offset, 4);
2858 append_insns (&buildaddr, 5, buf);
2859 current_insn_ptr = buildaddr;
2860 }
2861
2862 static void
2863 i386_emit_reg (int reg)
2864 {
2865 unsigned char buf[16];
2866 int i;
2867 CORE_ADDR buildaddr;
2868
2869 EMIT_ASM32 (i386_reg_a,
2870 "sub $0x8,%esp");
2871 buildaddr = current_insn_ptr;
2872 i = 0;
2873 buf[i++] = 0xb8; /* mov $<n>,%eax */
2874 memcpy (&buf[i], &reg, sizeof (reg));
2875 i += 4;
2876 append_insns (&buildaddr, i, buf);
2877 current_insn_ptr = buildaddr;
2878 EMIT_ASM32 (i386_reg_b,
2879 "mov %eax,4(%esp)\n\t"
2880 "mov 8(%ebp),%eax\n\t"
2881 "mov %eax,(%esp)");
2882 i386_emit_call (get_raw_reg_func_addr ());
2883 EMIT_ASM32 (i386_reg_c,
2884 "xor %ebx,%ebx\n\t"
2885 "lea 0x8(%esp),%esp");
2886 }
2887
2888 static void
2889 i386_emit_pop (void)
2890 {
2891 EMIT_ASM32 (i386_pop,
2892 "pop %eax\n\t"
2893 "pop %ebx");
2894 }
2895
2896 static void
2897 i386_emit_stack_flush (void)
2898 {
2899 EMIT_ASM32 (i386_stack_flush,
2900 "push %ebx\n\t"
2901 "push %eax");
2902 }
2903
2904 static void
2905 i386_emit_zero_ext (int arg)
2906 {
2907 switch (arg)
2908 {
2909 case 8:
2910 EMIT_ASM32 (i386_zero_ext_8,
2911 "and $0xff,%eax\n\t"
2912 "xor %ebx,%ebx");
2913 break;
2914 case 16:
2915 EMIT_ASM32 (i386_zero_ext_16,
2916 "and $0xffff,%eax\n\t"
2917 "xor %ebx,%ebx");
2918 break;
2919 case 32:
2920 EMIT_ASM32 (i386_zero_ext_32,
2921 "xor %ebx,%ebx");
2922 break;
2923 default:
2924 emit_error = 1;
2925 }
2926 }
2927
2928 static void
2929 i386_emit_swap (void)
2930 {
2931 EMIT_ASM32 (i386_swap,
2932 "mov %eax,%ecx\n\t"
2933 "mov %ebx,%edx\n\t"
2934 "pop %eax\n\t"
2935 "pop %ebx\n\t"
2936 "push %edx\n\t"
2937 "push %ecx");
2938 }
2939
2940 static void
2941 i386_emit_stack_adjust (int n)
2942 {
2943 unsigned char buf[16];
2944 int i;
2945 CORE_ADDR buildaddr = current_insn_ptr;
2946
2947 i = 0;
2948 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2949 buf[i++] = 0x64;
2950 buf[i++] = 0x24;
2951 buf[i++] = n * 8;
2952 append_insns (&buildaddr, i, buf);
2953 current_insn_ptr = buildaddr;
2954 }
2955
2956 /* FN's prototype is `LONGEST(*fn)(int)'. */
2957
2958 static void
2959 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2960 {
2961 unsigned char buf[16];
2962 int i;
2963 CORE_ADDR buildaddr;
2964
2965 EMIT_ASM32 (i386_int_call_1_a,
2966 /* Reserve a bit of stack space. */
2967 "sub $0x8,%esp");
2968 /* Put the one argument on the stack. */
2969 buildaddr = current_insn_ptr;
2970 i = 0;
2971 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2972 buf[i++] = 0x04;
2973 buf[i++] = 0x24;
2974 memcpy (&buf[i], &arg1, sizeof (arg1));
2975 i += 4;
2976 append_insns (&buildaddr, i, buf);
2977 current_insn_ptr = buildaddr;
2978 i386_emit_call (fn);
2979 EMIT_ASM32 (i386_int_call_1_c,
2980 "mov %edx,%ebx\n\t"
2981 "lea 0x8(%esp),%esp");
2982 }
2983
2984 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2985
2986 static void
2987 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2988 {
2989 unsigned char buf[16];
2990 int i;
2991 CORE_ADDR buildaddr;
2992
2993 EMIT_ASM32 (i386_void_call_2_a,
2994 /* Preserve %eax only; we don't have to worry about %ebx. */
2995 "push %eax\n\t"
2996 /* Reserve a bit of stack space for arguments. */
2997 "sub $0x10,%esp\n\t"
2998 /* Copy "top" to the second argument position. (Note that
2999 we can't assume function won't scribble on its
3000 arguments, so don't try to restore from this.) */
3001 "mov %eax,4(%esp)\n\t"
3002 "mov %ebx,8(%esp)");
3003 /* Put the first argument on the stack. */
3004 buildaddr = current_insn_ptr;
3005 i = 0;
3006 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3007 buf[i++] = 0x04;
3008 buf[i++] = 0x24;
3009 memcpy (&buf[i], &arg1, sizeof (arg1));
3010 i += 4;
3011 append_insns (&buildaddr, i, buf);
3012 current_insn_ptr = buildaddr;
3013 i386_emit_call (fn);
3014 EMIT_ASM32 (i386_void_call_2_b,
3015 "lea 0x10(%esp),%esp\n\t"
3016 /* Restore original stack top. */
3017 "pop %eax");
3018 }
3019
3020
3021 void
3022 i386_emit_eq_goto (int *offset_p, int *size_p)
3023 {
3024 EMIT_ASM32 (eq,
3025 /* Check low half first, more likely to be decider */
3026 "cmpl %eax,(%esp)\n\t"
3027 "jne .Leq_fallthru\n\t"
3028 "cmpl %ebx,4(%esp)\n\t"
3029 "jne .Leq_fallthru\n\t"
3030 "lea 0x8(%esp),%esp\n\t"
3031 "pop %eax\n\t"
3032 "pop %ebx\n\t"
3033 /* jmp, but don't trust the assembler to choose the right jump */
3034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3035 ".Leq_fallthru:\n\t"
3036 "lea 0x8(%esp),%esp\n\t"
3037 "pop %eax\n\t"
3038 "pop %ebx");
3039
3040 if (offset_p)
3041 *offset_p = 18;
3042 if (size_p)
3043 *size_p = 4;
3044 }
3045
3046 void
3047 i386_emit_ne_goto (int *offset_p, int *size_p)
3048 {
3049 EMIT_ASM32 (ne,
3050 /* Check low half first, more likely to be decider */
3051 "cmpl %eax,(%esp)\n\t"
3052 "jne .Lne_jump\n\t"
3053 "cmpl %ebx,4(%esp)\n\t"
3054 "je .Lne_fallthru\n\t"
3055 ".Lne_jump:\n\t"
3056 "lea 0x8(%esp),%esp\n\t"
3057 "pop %eax\n\t"
3058 "pop %ebx\n\t"
3059 /* jmp, but don't trust the assembler to choose the right jump */
3060 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3061 ".Lne_fallthru:\n\t"
3062 "lea 0x8(%esp),%esp\n\t"
3063 "pop %eax\n\t"
3064 "pop %ebx");
3065
3066 if (offset_p)
3067 *offset_p = 18;
3068 if (size_p)
3069 *size_p = 4;
3070 }
3071
3072 void
3073 i386_emit_lt_goto (int *offset_p, int *size_p)
3074 {
3075 EMIT_ASM32 (lt,
3076 "cmpl %ebx,4(%esp)\n\t"
3077 "jl .Llt_jump\n\t"
3078 "jne .Llt_fallthru\n\t"
3079 "cmpl %eax,(%esp)\n\t"
3080 "jnl .Llt_fallthru\n\t"
3081 ".Llt_jump:\n\t"
3082 "lea 0x8(%esp),%esp\n\t"
3083 "pop %eax\n\t"
3084 "pop %ebx\n\t"
3085 /* jmp, but don't trust the assembler to choose the right jump */
3086 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3087 ".Llt_fallthru:\n\t"
3088 "lea 0x8(%esp),%esp\n\t"
3089 "pop %eax\n\t"
3090 "pop %ebx");
3091
3092 if (offset_p)
3093 *offset_p = 20;
3094 if (size_p)
3095 *size_p = 4;
3096 }
3097
3098 void
3099 i386_emit_le_goto (int *offset_p, int *size_p)
3100 {
3101 EMIT_ASM32 (le,
3102 "cmpl %ebx,4(%esp)\n\t"
3103 "jle .Lle_jump\n\t"
3104 "jne .Lle_fallthru\n\t"
3105 "cmpl %eax,(%esp)\n\t"
3106 "jnle .Lle_fallthru\n\t"
3107 ".Lle_jump:\n\t"
3108 "lea 0x8(%esp),%esp\n\t"
3109 "pop %eax\n\t"
3110 "pop %ebx\n\t"
3111 /* jmp, but don't trust the assembler to choose the right jump */
3112 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3113 ".Lle_fallthru:\n\t"
3114 "lea 0x8(%esp),%esp\n\t"
3115 "pop %eax\n\t"
3116 "pop %ebx");
3117
3118 if (offset_p)
3119 *offset_p = 20;
3120 if (size_p)
3121 *size_p = 4;
3122 }
3123
3124 void
3125 i386_emit_gt_goto (int *offset_p, int *size_p)
3126 {
3127 EMIT_ASM32 (gt,
3128 "cmpl %ebx,4(%esp)\n\t"
3129 "jg .Lgt_jump\n\t"
3130 "jne .Lgt_fallthru\n\t"
3131 "cmpl %eax,(%esp)\n\t"
3132 "jng .Lgt_fallthru\n\t"
3133 ".Lgt_jump:\n\t"
3134 "lea 0x8(%esp),%esp\n\t"
3135 "pop %eax\n\t"
3136 "pop %ebx\n\t"
3137 /* jmp, but don't trust the assembler to choose the right jump */
3138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3139 ".Lgt_fallthru:\n\t"
3140 "lea 0x8(%esp),%esp\n\t"
3141 "pop %eax\n\t"
3142 "pop %ebx");
3143
3144 if (offset_p)
3145 *offset_p = 20;
3146 if (size_p)
3147 *size_p = 4;
3148 }
3149
3150 void
3151 i386_emit_ge_goto (int *offset_p, int *size_p)
3152 {
3153 EMIT_ASM32 (ge,
3154 "cmpl %ebx,4(%esp)\n\t"
3155 "jge .Lge_jump\n\t"
3156 "jne .Lge_fallthru\n\t"
3157 "cmpl %eax,(%esp)\n\t"
3158 "jnge .Lge_fallthru\n\t"
3159 ".Lge_jump:\n\t"
3160 "lea 0x8(%esp),%esp\n\t"
3161 "pop %eax\n\t"
3162 "pop %ebx\n\t"
3163 /* jmp, but don't trust the assembler to choose the right jump */
3164 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3165 ".Lge_fallthru:\n\t"
3166 "lea 0x8(%esp),%esp\n\t"
3167 "pop %eax\n\t"
3168 "pop %ebx");
3169
3170 if (offset_p)
3171 *offset_p = 20;
3172 if (size_p)
3173 *size_p = 4;
3174 }
3175
3176 struct emit_ops i386_emit_ops =
3177 {
3178 i386_emit_prologue,
3179 i386_emit_epilogue,
3180 i386_emit_add,
3181 i386_emit_sub,
3182 i386_emit_mul,
3183 i386_emit_lsh,
3184 i386_emit_rsh_signed,
3185 i386_emit_rsh_unsigned,
3186 i386_emit_ext,
3187 i386_emit_log_not,
3188 i386_emit_bit_and,
3189 i386_emit_bit_or,
3190 i386_emit_bit_xor,
3191 i386_emit_bit_not,
3192 i386_emit_equal,
3193 i386_emit_less_signed,
3194 i386_emit_less_unsigned,
3195 i386_emit_ref,
3196 i386_emit_if_goto,
3197 i386_emit_goto,
3198 i386_write_goto_address,
3199 i386_emit_const,
3200 i386_emit_call,
3201 i386_emit_reg,
3202 i386_emit_pop,
3203 i386_emit_stack_flush,
3204 i386_emit_zero_ext,
3205 i386_emit_swap,
3206 i386_emit_stack_adjust,
3207 i386_emit_int_call_1,
3208 i386_emit_void_call_2,
3209 i386_emit_eq_goto,
3210 i386_emit_ne_goto,
3211 i386_emit_lt_goto,
3212 i386_emit_le_goto,
3213 i386_emit_gt_goto,
3214 i386_emit_ge_goto
3215 };
3216
3217
3218 static struct emit_ops *
3219 x86_emit_ops (void)
3220 {
3221 #ifdef __x86_64__
3222 if (is_64bit_tdesc ())
3223 return &amd64_emit_ops;
3224 else
3225 #endif
3226 return &i386_emit_ops;
3227 }
3228
3229 static int
3230 x86_supports_range_stepping (void)
3231 {
3232 return 1;
3233 }
3234
3235 /* This is initialized assuming an amd64 target.
3236 x86_arch_setup will correct it for i386 or amd64 targets. */
3237
3238 struct linux_target_ops the_low_target =
3239 {
3240 x86_arch_setup,
3241 x86_linux_regs_info,
3242 x86_cannot_fetch_register,
3243 x86_cannot_store_register,
3244 NULL, /* fetch_register */
3245 x86_get_pc,
3246 x86_set_pc,
3247 x86_breakpoint,
3248 x86_breakpoint_len,
3249 NULL,
3250 1,
3251 x86_breakpoint_at,
3252 x86_supports_z_point_type,
3253 x86_insert_point,
3254 x86_remove_point,
3255 x86_stopped_by_watchpoint,
3256 x86_stopped_data_address,
3257 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3258 native i386 case (no registers smaller than an xfer unit), and are not
3259 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3260 NULL,
3261 NULL,
3262 /* need to fix up i386 siginfo if host is amd64 */
3263 x86_siginfo_fixup,
3264 x86_linux_new_process,
3265 x86_linux_new_thread,
3266 x86_linux_prepare_to_resume,
3267 x86_linux_process_qsupported,
3268 x86_supports_tracepoints,
3269 x86_get_thread_area,
3270 x86_install_fast_tracepoint_jump_pad,
3271 x86_emit_ops,
3272 x86_get_min_fast_tracepoint_insn_len,
3273 x86_supports_range_stepping,
3274 };
3275
3276 void
3277 initialize_low_arch (void)
3278 {
3279 /* Initialize the Linux target descriptions. */
3280 #ifdef __x86_64__
3281 init_registers_amd64_linux ();
3282 init_registers_amd64_avx_linux ();
3283 init_registers_amd64_avx512_linux ();
3284 init_registers_amd64_mpx_linux ();
3285
3286 init_registers_x32_linux ();
3287 init_registers_x32_avx_linux ();
3288 init_registers_x32_avx512_linux ();
3289
3290 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3291 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3292 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3293 #endif
3294 init_registers_i386_linux ();
3295 init_registers_i386_mmx_linux ();
3296 init_registers_i386_avx_linux ();
3297 init_registers_i386_avx512_linux ();
3298 init_registers_i386_mpx_linux ();
3299
3300 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3301 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3302 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3303
3304 initialize_regsets_info (&x86_regsets_info);
3305 }
This page took 0.096237 seconds and 5 git commands to generate.