Move stddef.h to common-defs.h
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "i386-low.h"
27 #include "i386-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40
41 #ifdef __x86_64__
42 /* Defined in auto-generated file amd64-linux.c. */
43 void init_registers_amd64_linux (void);
44 extern const struct target_desc *tdesc_amd64_linux;
45
46 /* Defined in auto-generated file amd64-avx-linux.c. */
47 void init_registers_amd64_avx_linux (void);
48 extern const struct target_desc *tdesc_amd64_avx_linux;
49
50 /* Defined in auto-generated file amd64-avx512-linux.c. */
51 void init_registers_amd64_avx512_linux (void);
52 extern const struct target_desc *tdesc_amd64_avx512_linux;
53
54 /* Defined in auto-generated file amd64-mpx-linux.c. */
55 void init_registers_amd64_mpx_linux (void);
56 extern const struct target_desc *tdesc_amd64_mpx_linux;
57
58 /* Defined in auto-generated file x32-linux.c. */
59 void init_registers_x32_linux (void);
60 extern const struct target_desc *tdesc_x32_linux;
61
62 /* Defined in auto-generated file x32-avx-linux.c. */
63 void init_registers_x32_avx_linux (void);
64 extern const struct target_desc *tdesc_x32_avx_linux;
65
66 /* Defined in auto-generated file x32-avx512-linux.c. */
67 void init_registers_x32_avx512_linux (void);
68 extern const struct target_desc *tdesc_x32_avx512_linux;
69
70 #endif
71
72 /* Defined in auto-generated file i386-linux.c. */
73 void init_registers_i386_linux (void);
74 extern const struct target_desc *tdesc_i386_linux;
75
76 /* Defined in auto-generated file i386-mmx-linux.c. */
77 void init_registers_i386_mmx_linux (void);
78 extern const struct target_desc *tdesc_i386_mmx_linux;
79
80 /* Defined in auto-generated file i386-avx-linux.c. */
81 void init_registers_i386_avx_linux (void);
82 extern const struct target_desc *tdesc_i386_avx_linux;
83
84 /* Defined in auto-generated file i386-avx512-linux.c. */
85 void init_registers_i386_avx512_linux (void);
86 extern const struct target_desc *tdesc_i386_avx512_linux;
87
88 /* Defined in auto-generated file i386-mpx-linux.c. */
89 void init_registers_i386_mpx_linux (void);
90 extern const struct target_desc *tdesc_i386_mpx_linux;
91
92 #ifdef __x86_64__
93 static struct target_desc *tdesc_amd64_linux_no_xml;
94 #endif
95 static struct target_desc *tdesc_i386_linux_no_xml;
96
97
98 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
99 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
100
101 /* Backward compatibility for gdb without XML support. */
102
103 static const char *xmltarget_i386_linux_no_xml = "@<target>\
104 <architecture>i386</architecture>\
105 <osabi>GNU/Linux</osabi>\
106 </target>";
107
108 #ifdef __x86_64__
109 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
110 <architecture>i386:x86-64</architecture>\
111 <osabi>GNU/Linux</osabi>\
112 </target>";
113 #endif
114
115 #include <sys/reg.h>
116 #include <sys/procfs.h>
117 #include <sys/ptrace.h>
118 #include <sys/uio.h>
119
120 #ifndef PTRACE_GETREGSET
121 #define PTRACE_GETREGSET 0x4204
122 #endif
123
124 #ifndef PTRACE_SETREGSET
125 #define PTRACE_SETREGSET 0x4205
126 #endif
127
128
129 #ifndef PTRACE_GET_THREAD_AREA
130 #define PTRACE_GET_THREAD_AREA 25
131 #endif
132
133 /* This definition comes from prctl.h, but some kernels may not have it. */
134 #ifndef PTRACE_ARCH_PRCTL
135 #define PTRACE_ARCH_PRCTL 30
136 #endif
137
138 /* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
140 #ifndef ARCH_GET_FS
141 #define ARCH_SET_GS 0x1001
142 #define ARCH_SET_FS 0x1002
143 #define ARCH_GET_FS 0x1003
144 #define ARCH_GET_GS 0x1004
145 #endif
146
147 /* Per-process arch-specific data we want to keep. */
148
149 struct arch_process_info
150 {
151 struct i386_debug_reg_state debug_reg_state;
152 };
153
154 /* Per-thread arch-specific data we want to keep. */
155
156 struct arch_lwp_info
157 {
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed;
160 };
161
162 #ifdef __x86_64__
163
164 /* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167 static /*const*/ int i386_regmap[] =
168 {
169 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
170 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
171 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
172 DS * 8, ES * 8, FS * 8, GS * 8
173 };
174
175 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
176
177 /* So code below doesn't have to care, i386 or amd64. */
178 #define ORIG_EAX ORIG_RAX
179
180 static const int x86_64_regmap[] =
181 {
182 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
183 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
184 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
185 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
186 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
187 DS * 8, ES * 8, FS * 8, GS * 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 ORIG_RAX * 8,
194 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
195 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1
205 };
206
207 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
208 #define X86_64_USER_REGS (GS + 1)
209
210 #else /* ! __x86_64__ */
211
212 /* Mapping between the general-purpose registers in `struct user'
213 format and GDB's register array layout. */
214 static /*const*/ int i386_regmap[] =
215 {
216 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
217 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
218 EIP * 4, EFL * 4, CS * 4, SS * 4,
219 DS * 4, ES * 4, FS * 4, GS * 4
220 };
221
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
223
224 #endif
225
226 #ifdef __x86_64__
227
228 /* Returns true if the current inferior belongs to a x86-64 process,
229 per the tdesc. */
230
231 static int
232 is_64bit_tdesc (void)
233 {
234 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
235
236 return register_size (regcache->tdesc, 0) == 8;
237 }
238
239 #endif
240
241 \f
242 /* Called by libthread_db. */
243
244 ps_err_e
245 ps_get_thread_area (const struct ps_prochandle *ph,
246 lwpid_t lwpid, int idx, void **base)
247 {
248 #ifdef __x86_64__
249 int use_64bit = is_64bit_tdesc ();
250
251 if (use_64bit)
252 {
253 switch (idx)
254 {
255 case FS:
256 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
257 return PS_OK;
258 break;
259 case GS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
261 return PS_OK;
262 break;
263 default:
264 return PS_BADADDR;
265 }
266 return PS_ERR;
267 }
268 #endif
269
270 {
271 unsigned int desc[4];
272
273 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
274 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
275 return PS_ERR;
276
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base = (void *) (uintptr_t) desc[1];
279 return PS_OK;
280 }
281 }
282
283 /* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
287
288 static int
289 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
290 {
291 #ifdef __x86_64__
292 int use_64bit = is_64bit_tdesc ();
293
294 if (use_64bit)
295 {
296 void *base;
297 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
298 {
299 *addr = (CORE_ADDR) (uintptr_t) base;
300 return 0;
301 }
302
303 return -1;
304 }
305 #endif
306
307 {
308 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
309 struct thread_info *thr = get_lwp_thread (lwp);
310 struct regcache *regcache = get_thread_regcache (thr, 1);
311 unsigned int desc[4];
312 ULONGEST gs = 0;
313 const int reg_thread_area = 3; /* bits to scale down register value. */
314 int idx;
315
316 collect_register_by_name (regcache, "gs", &gs);
317
318 idx = gs >> reg_thread_area;
319
320 if (ptrace (PTRACE_GET_THREAD_AREA,
321 lwpid_of (thr),
322 (void *) (long) idx, (unsigned long) &desc) < 0)
323 return -1;
324
325 *addr = desc[1];
326 return 0;
327 }
328 }
329
330
331 \f
332 static int
333 x86_cannot_store_register (int regno)
334 {
335 #ifdef __x86_64__
336 if (is_64bit_tdesc ())
337 return 0;
338 #endif
339
340 return regno >= I386_NUM_REGS;
341 }
342
343 static int
344 x86_cannot_fetch_register (int regno)
345 {
346 #ifdef __x86_64__
347 if (is_64bit_tdesc ())
348 return 0;
349 #endif
350
351 return regno >= I386_NUM_REGS;
352 }
353
354 static void
355 x86_fill_gregset (struct regcache *regcache, void *buf)
356 {
357 int i;
358
359 #ifdef __x86_64__
360 if (register_size (regcache->tdesc, 0) == 8)
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
364 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
365 return;
366 }
367
368 /* 32-bit inferior registers need to be zero-extended.
369 Callers would read uninitialized memory otherwise. */
370 memset (buf, 0x00, X86_64_USER_REGS * 8);
371 #endif
372
373 for (i = 0; i < I386_NUM_REGS; i++)
374 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
375
376 collect_register_by_name (regcache, "orig_eax",
377 ((char *) buf) + ORIG_EAX * 4);
378 }
379
380 static void
381 x86_store_gregset (struct regcache *regcache, const void *buf)
382 {
383 int i;
384
385 #ifdef __x86_64__
386 if (register_size (regcache->tdesc, 0) == 8)
387 {
388 for (i = 0; i < X86_64_NUM_REGS; i++)
389 if (x86_64_regmap[i] != -1)
390 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
391 return;
392 }
393 #endif
394
395 for (i = 0; i < I386_NUM_REGS; i++)
396 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
397
398 supply_register_by_name (regcache, "orig_eax",
399 ((char *) buf) + ORIG_EAX * 4);
400 }
401
402 static void
403 x86_fill_fpregset (struct regcache *regcache, void *buf)
404 {
405 #ifdef __x86_64__
406 i387_cache_to_fxsave (regcache, buf);
407 #else
408 i387_cache_to_fsave (regcache, buf);
409 #endif
410 }
411
412 static void
413 x86_store_fpregset (struct regcache *regcache, const void *buf)
414 {
415 #ifdef __x86_64__
416 i387_fxsave_to_cache (regcache, buf);
417 #else
418 i387_fsave_to_cache (regcache, buf);
419 #endif
420 }
421
422 #ifndef __x86_64__
423
424 static void
425 x86_fill_fpxregset (struct regcache *regcache, void *buf)
426 {
427 i387_cache_to_fxsave (regcache, buf);
428 }
429
430 static void
431 x86_store_fpxregset (struct regcache *regcache, const void *buf)
432 {
433 i387_fxsave_to_cache (regcache, buf);
434 }
435
436 #endif
437
438 static void
439 x86_fill_xstateregset (struct regcache *regcache, void *buf)
440 {
441 i387_cache_to_xsave (regcache, buf);
442 }
443
444 static void
445 x86_store_xstateregset (struct regcache *regcache, const void *buf)
446 {
447 i387_xsave_to_cache (regcache, buf);
448 }
449
450 /* ??? The non-biarch i386 case stores all the i387 regs twice.
451 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
452 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
453 doesn't work. IWBN to avoid the duplication in the case where it
454 does work. Maybe the arch_setup routine could check whether it works
455 and update the supported regsets accordingly. */
456
457 static struct regset_info x86_regsets[] =
458 {
459 #ifdef HAVE_PTRACE_GETREGS
460 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
461 GENERAL_REGS,
462 x86_fill_gregset, x86_store_gregset },
463 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
464 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
465 # ifndef __x86_64__
466 # ifdef HAVE_PTRACE_GETFPXREGS
467 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
468 EXTENDED_REGS,
469 x86_fill_fpxregset, x86_store_fpxregset },
470 # endif
471 # endif
472 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
473 FP_REGS,
474 x86_fill_fpregset, x86_store_fpregset },
475 #endif /* HAVE_PTRACE_GETREGS */
476 { 0, 0, 0, -1, -1, NULL, NULL }
477 };
478
479 static CORE_ADDR
480 x86_get_pc (struct regcache *regcache)
481 {
482 int use_64bit = register_size (regcache->tdesc, 0) == 8;
483
484 if (use_64bit)
485 {
486 unsigned long pc;
487 collect_register_by_name (regcache, "rip", &pc);
488 return (CORE_ADDR) pc;
489 }
490 else
491 {
492 unsigned int pc;
493 collect_register_by_name (regcache, "eip", &pc);
494 return (CORE_ADDR) pc;
495 }
496 }
497
498 static void
499 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
500 {
501 int use_64bit = register_size (regcache->tdesc, 0) == 8;
502
503 if (use_64bit)
504 {
505 unsigned long newpc = pc;
506 supply_register_by_name (regcache, "rip", &newpc);
507 }
508 else
509 {
510 unsigned int newpc = pc;
511 supply_register_by_name (regcache, "eip", &newpc);
512 }
513 }
514 \f
515 static const unsigned char x86_breakpoint[] = { 0xCC };
516 #define x86_breakpoint_len 1
517
518 static int
519 x86_breakpoint_at (CORE_ADDR pc)
520 {
521 unsigned char c;
522
523 (*the_target->read_memory) (pc, &c, 1);
524 if (c == 0xCC)
525 return 1;
526
527 return 0;
528 }
529 \f
530 /* Support for debug registers. */
531
532 static unsigned long
533 x86_linux_dr_get (ptid_t ptid, int regnum)
534 {
535 int tid;
536 unsigned long value;
537
538 tid = ptid_get_lwp (ptid);
539
540 errno = 0;
541 value = ptrace (PTRACE_PEEKUSER, tid,
542 offsetof (struct user, u_debugreg[regnum]), 0);
543 if (errno != 0)
544 error ("Couldn't read debug register");
545
546 return value;
547 }
548
549 static void
550 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
551 {
552 int tid;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
557 ptrace (PTRACE_POKEUSER, tid,
558 offsetof (struct user, u_debugreg[regnum]), value);
559 if (errno != 0)
560 error ("Couldn't write debug register");
561 }
562
563 static int
564 update_debug_registers_callback (struct inferior_list_entry *entry,
565 void *pid_p)
566 {
567 struct thread_info *thr = (struct thread_info *) entry;
568 struct lwp_info *lwp = get_thread_lwp (thr);
569 int pid = *(int *) pid_p;
570
571 /* Only update the threads of this process. */
572 if (pid_of (thr) == pid)
573 {
574 /* The actual update is done later just before resuming the lwp,
575 we just mark that the registers need updating. */
576 lwp->arch_private->debug_registers_changed = 1;
577
578 /* If the lwp isn't stopped, force it to momentarily pause, so
579 we can update its debug registers. */
580 if (!lwp->stopped)
581 linux_stop_lwp (lwp);
582 }
583
584 return 0;
585 }
586
587 /* Update the inferior's debug register REGNUM from STATE. */
588
589 static void
590 i386_dr_low_set_addr (int regnum, CORE_ADDR addr)
591 {
592 /* Only update the threads of this process. */
593 int pid = pid_of (current_inferior);
594
595 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
596 fatal ("Invalid debug register %d", regnum);
597
598 find_inferior (&all_threads, update_debug_registers_callback, &pid);
599 }
600
601 /* Return the inferior's debug register REGNUM. */
602
603 static CORE_ADDR
604 i386_dr_low_get_addr (int regnum)
605 {
606 ptid_t ptid = ptid_of (current_inferior);
607
608 /* DR6 and DR7 are retrieved with some other way. */
609 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
610
611 return x86_linux_dr_get (ptid, regnum);
612 }
613
614 /* Update the inferior's DR7 debug control register from STATE. */
615
616 static void
617 i386_dr_low_set_control (unsigned long control)
618 {
619 /* Only update the threads of this process. */
620 int pid = pid_of (current_inferior);
621
622 find_inferior (&all_threads, update_debug_registers_callback, &pid);
623 }
624
625 /* Return the inferior's DR7 debug control register. */
626
627 static unsigned long
628 i386_dr_low_get_control (void)
629 {
630 ptid_t ptid = ptid_of (current_inferior);
631
632 return x86_linux_dr_get (ptid, DR_CONTROL);
633 }
634
635 /* Get the value of the DR6 debug status register from the inferior
636 and record it in STATE. */
637
638 static unsigned long
639 i386_dr_low_get_status (void)
640 {
641 ptid_t ptid = ptid_of (current_inferior);
642
643 return x86_linux_dr_get (ptid, DR_STATUS);
644 }
645
646 /* Low-level function vector. */
647 struct i386_dr_low_type i386_dr_low =
648 {
649 i386_dr_low_set_control,
650 i386_dr_low_set_addr,
651 i386_dr_low_get_addr,
652 i386_dr_low_get_status,
653 i386_dr_low_get_control,
654 sizeof (void *),
655 };
656 \f
657 /* Breakpoint/Watchpoint support. */
658
659 static int
660 x86_supports_z_point_type (char z_type)
661 {
662 switch (z_type)
663 {
664 case Z_PACKET_SW_BP:
665 case Z_PACKET_HW_BP:
666 case Z_PACKET_WRITE_WP:
667 case Z_PACKET_ACCESS_WP:
668 return 1;
669 default:
670 return 0;
671 }
672 }
673
674 static int
675 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
676 int size, struct raw_breakpoint *bp)
677 {
678 struct process_info *proc = current_process ();
679
680 switch (type)
681 {
682 case raw_bkpt_type_sw:
683 return insert_memory_breakpoint (bp);
684
685 case raw_bkpt_type_hw:
686 case raw_bkpt_type_write_wp:
687 case raw_bkpt_type_access_wp:
688 {
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type);
691 struct i386_debug_reg_state *state
692 = &proc->private->arch_private->debug_reg_state;
693
694 return i386_dr_insert_watchpoint (state, hw_type, addr, size);
695 }
696
697 default:
698 /* Unsupported. */
699 return 1;
700 }
701 }
702
703 static int
704 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
705 int size, struct raw_breakpoint *bp)
706 {
707 struct process_info *proc = current_process ();
708
709 switch (type)
710 {
711 case raw_bkpt_type_sw:
712 return remove_memory_breakpoint (bp);
713
714 case raw_bkpt_type_hw:
715 case raw_bkpt_type_write_wp:
716 case raw_bkpt_type_access_wp:
717 {
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type);
720 struct i386_debug_reg_state *state
721 = &proc->private->arch_private->debug_reg_state;
722
723 return i386_dr_remove_watchpoint (state, hw_type, addr, size);
724 }
725 default:
726 /* Unsupported. */
727 return 1;
728 }
729 }
730
731 static int
732 x86_stopped_by_watchpoint (void)
733 {
734 struct process_info *proc = current_process ();
735 return i386_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
736 }
737
738 static CORE_ADDR
739 x86_stopped_data_address (void)
740 {
741 struct process_info *proc = current_process ();
742 CORE_ADDR addr;
743 if (i386_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
744 &addr))
745 return addr;
746 return 0;
747 }
748 \f
749 /* Called when a new process is created. */
750
751 static struct arch_process_info *
752 x86_linux_new_process (void)
753 {
754 struct arch_process_info *info = xcalloc (1, sizeof (*info));
755
756 i386_low_init_dregs (&info->debug_reg_state);
757
758 return info;
759 }
760
761 /* Called when a new thread is detected. */
762
763 static struct arch_lwp_info *
764 x86_linux_new_thread (void)
765 {
766 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
767
768 info->debug_registers_changed = 1;
769
770 return info;
771 }
772
773 /* Called when resuming a thread.
774 If the debug regs have changed, update the thread's copies. */
775
776 static void
777 x86_linux_prepare_to_resume (struct lwp_info *lwp)
778 {
779 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
780 int clear_status = 0;
781
782 if (lwp->arch_private->debug_registers_changed)
783 {
784 int i;
785 int pid = ptid_get_pid (ptid);
786 struct process_info *proc = find_process_pid (pid);
787 struct i386_debug_reg_state *state
788 = &proc->private->arch_private->debug_reg_state;
789
790 x86_linux_dr_set (ptid, DR_CONTROL, 0);
791
792 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
793 if (state->dr_ref_count[i] > 0)
794 {
795 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
796
797 /* If we're setting a watchpoint, any change the inferior
798 had done itself to the debug registers needs to be
799 discarded, otherwise, i386_dr_stopped_data_address can
800 get confused. */
801 clear_status = 1;
802 }
803
804 if (state->dr_control_mirror != 0)
805 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
806
807 lwp->arch_private->debug_registers_changed = 0;
808 }
809
810 if (clear_status || lwp->stopped_by_watchpoint)
811 x86_linux_dr_set (ptid, DR_STATUS, 0);
812 }
813 \f
814 /* When GDBSERVER is built as a 64-bit application on linux, the
815 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
816 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
817 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
818 conversion in-place ourselves. */
819
820 /* These types below (compat_*) define a siginfo type that is layout
821 compatible with the siginfo type exported by the 32-bit userspace
822 support. */
823
824 #ifdef __x86_64__
825
826 typedef int compat_int_t;
827 typedef unsigned int compat_uptr_t;
828
829 typedef int compat_time_t;
830 typedef int compat_timer_t;
831 typedef int compat_clock_t;
832
833 struct compat_timeval
834 {
835 compat_time_t tv_sec;
836 int tv_usec;
837 };
838
839 typedef union compat_sigval
840 {
841 compat_int_t sival_int;
842 compat_uptr_t sival_ptr;
843 } compat_sigval_t;
844
845 typedef struct compat_siginfo
846 {
847 int si_signo;
848 int si_errno;
849 int si_code;
850
851 union
852 {
853 int _pad[((128 / sizeof (int)) - 3)];
854
855 /* kill() */
856 struct
857 {
858 unsigned int _pid;
859 unsigned int _uid;
860 } _kill;
861
862 /* POSIX.1b timers */
863 struct
864 {
865 compat_timer_t _tid;
866 int _overrun;
867 compat_sigval_t _sigval;
868 } _timer;
869
870 /* POSIX.1b signals */
871 struct
872 {
873 unsigned int _pid;
874 unsigned int _uid;
875 compat_sigval_t _sigval;
876 } _rt;
877
878 /* SIGCHLD */
879 struct
880 {
881 unsigned int _pid;
882 unsigned int _uid;
883 int _status;
884 compat_clock_t _utime;
885 compat_clock_t _stime;
886 } _sigchld;
887
888 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
889 struct
890 {
891 unsigned int _addr;
892 } _sigfault;
893
894 /* SIGPOLL */
895 struct
896 {
897 int _band;
898 int _fd;
899 } _sigpoll;
900 } _sifields;
901 } compat_siginfo_t;
902
903 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
904 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
905
906 typedef struct compat_x32_siginfo
907 {
908 int si_signo;
909 int si_errno;
910 int si_code;
911
912 union
913 {
914 int _pad[((128 / sizeof (int)) - 3)];
915
916 /* kill() */
917 struct
918 {
919 unsigned int _pid;
920 unsigned int _uid;
921 } _kill;
922
923 /* POSIX.1b timers */
924 struct
925 {
926 compat_timer_t _tid;
927 int _overrun;
928 compat_sigval_t _sigval;
929 } _timer;
930
931 /* POSIX.1b signals */
932 struct
933 {
934 unsigned int _pid;
935 unsigned int _uid;
936 compat_sigval_t _sigval;
937 } _rt;
938
939 /* SIGCHLD */
940 struct
941 {
942 unsigned int _pid;
943 unsigned int _uid;
944 int _status;
945 compat_x32_clock_t _utime;
946 compat_x32_clock_t _stime;
947 } _sigchld;
948
949 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
950 struct
951 {
952 unsigned int _addr;
953 } _sigfault;
954
955 /* SIGPOLL */
956 struct
957 {
958 int _band;
959 int _fd;
960 } _sigpoll;
961 } _sifields;
962 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
963
964 #define cpt_si_pid _sifields._kill._pid
965 #define cpt_si_uid _sifields._kill._uid
966 #define cpt_si_timerid _sifields._timer._tid
967 #define cpt_si_overrun _sifields._timer._overrun
968 #define cpt_si_status _sifields._sigchld._status
969 #define cpt_si_utime _sifields._sigchld._utime
970 #define cpt_si_stime _sifields._sigchld._stime
971 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
972 #define cpt_si_addr _sifields._sigfault._addr
973 #define cpt_si_band _sifields._sigpoll._band
974 #define cpt_si_fd _sifields._sigpoll._fd
975
976 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
977 In their place is si_timer1,si_timer2. */
978 #ifndef si_timerid
979 #define si_timerid si_timer1
980 #endif
981 #ifndef si_overrun
982 #define si_overrun si_timer2
983 #endif
984
985 static void
986 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
987 {
988 memset (to, 0, sizeof (*to));
989
990 to->si_signo = from->si_signo;
991 to->si_errno = from->si_errno;
992 to->si_code = from->si_code;
993
994 if (to->si_code == SI_TIMER)
995 {
996 to->cpt_si_timerid = from->si_timerid;
997 to->cpt_si_overrun = from->si_overrun;
998 to->cpt_si_ptr = (intptr_t) from->si_ptr;
999 }
1000 else if (to->si_code == SI_USER)
1001 {
1002 to->cpt_si_pid = from->si_pid;
1003 to->cpt_si_uid = from->si_uid;
1004 }
1005 else if (to->si_code < 0)
1006 {
1007 to->cpt_si_pid = from->si_pid;
1008 to->cpt_si_uid = from->si_uid;
1009 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1010 }
1011 else
1012 {
1013 switch (to->si_signo)
1014 {
1015 case SIGCHLD:
1016 to->cpt_si_pid = from->si_pid;
1017 to->cpt_si_uid = from->si_uid;
1018 to->cpt_si_status = from->si_status;
1019 to->cpt_si_utime = from->si_utime;
1020 to->cpt_si_stime = from->si_stime;
1021 break;
1022 case SIGILL:
1023 case SIGFPE:
1024 case SIGSEGV:
1025 case SIGBUS:
1026 to->cpt_si_addr = (intptr_t) from->si_addr;
1027 break;
1028 case SIGPOLL:
1029 to->cpt_si_band = from->si_band;
1030 to->cpt_si_fd = from->si_fd;
1031 break;
1032 default:
1033 to->cpt_si_pid = from->si_pid;
1034 to->cpt_si_uid = from->si_uid;
1035 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1036 break;
1037 }
1038 }
1039 }
1040
1041 static void
1042 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1043 {
1044 memset (to, 0, sizeof (*to));
1045
1046 to->si_signo = from->si_signo;
1047 to->si_errno = from->si_errno;
1048 to->si_code = from->si_code;
1049
1050 if (to->si_code == SI_TIMER)
1051 {
1052 to->si_timerid = from->cpt_si_timerid;
1053 to->si_overrun = from->cpt_si_overrun;
1054 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1055 }
1056 else if (to->si_code == SI_USER)
1057 {
1058 to->si_pid = from->cpt_si_pid;
1059 to->si_uid = from->cpt_si_uid;
1060 }
1061 else if (to->si_code < 0)
1062 {
1063 to->si_pid = from->cpt_si_pid;
1064 to->si_uid = from->cpt_si_uid;
1065 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1066 }
1067 else
1068 {
1069 switch (to->si_signo)
1070 {
1071 case SIGCHLD:
1072 to->si_pid = from->cpt_si_pid;
1073 to->si_uid = from->cpt_si_uid;
1074 to->si_status = from->cpt_si_status;
1075 to->si_utime = from->cpt_si_utime;
1076 to->si_stime = from->cpt_si_stime;
1077 break;
1078 case SIGILL:
1079 case SIGFPE:
1080 case SIGSEGV:
1081 case SIGBUS:
1082 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1083 break;
1084 case SIGPOLL:
1085 to->si_band = from->cpt_si_band;
1086 to->si_fd = from->cpt_si_fd;
1087 break;
1088 default:
1089 to->si_pid = from->cpt_si_pid;
1090 to->si_uid = from->cpt_si_uid;
1091 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1092 break;
1093 }
1094 }
1095 }
1096
1097 static void
1098 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1099 siginfo_t *from)
1100 {
1101 memset (to, 0, sizeof (*to));
1102
1103 to->si_signo = from->si_signo;
1104 to->si_errno = from->si_errno;
1105 to->si_code = from->si_code;
1106
1107 if (to->si_code == SI_TIMER)
1108 {
1109 to->cpt_si_timerid = from->si_timerid;
1110 to->cpt_si_overrun = from->si_overrun;
1111 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1112 }
1113 else if (to->si_code == SI_USER)
1114 {
1115 to->cpt_si_pid = from->si_pid;
1116 to->cpt_si_uid = from->si_uid;
1117 }
1118 else if (to->si_code < 0)
1119 {
1120 to->cpt_si_pid = from->si_pid;
1121 to->cpt_si_uid = from->si_uid;
1122 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1123 }
1124 else
1125 {
1126 switch (to->si_signo)
1127 {
1128 case SIGCHLD:
1129 to->cpt_si_pid = from->si_pid;
1130 to->cpt_si_uid = from->si_uid;
1131 to->cpt_si_status = from->si_status;
1132 to->cpt_si_utime = from->si_utime;
1133 to->cpt_si_stime = from->si_stime;
1134 break;
1135 case SIGILL:
1136 case SIGFPE:
1137 case SIGSEGV:
1138 case SIGBUS:
1139 to->cpt_si_addr = (intptr_t) from->si_addr;
1140 break;
1141 case SIGPOLL:
1142 to->cpt_si_band = from->si_band;
1143 to->cpt_si_fd = from->si_fd;
1144 break;
1145 default:
1146 to->cpt_si_pid = from->si_pid;
1147 to->cpt_si_uid = from->si_uid;
1148 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1149 break;
1150 }
1151 }
1152 }
1153
1154 static void
1155 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1156 compat_x32_siginfo_t *from)
1157 {
1158 memset (to, 0, sizeof (*to));
1159
1160 to->si_signo = from->si_signo;
1161 to->si_errno = from->si_errno;
1162 to->si_code = from->si_code;
1163
1164 if (to->si_code == SI_TIMER)
1165 {
1166 to->si_timerid = from->cpt_si_timerid;
1167 to->si_overrun = from->cpt_si_overrun;
1168 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1169 }
1170 else if (to->si_code == SI_USER)
1171 {
1172 to->si_pid = from->cpt_si_pid;
1173 to->si_uid = from->cpt_si_uid;
1174 }
1175 else if (to->si_code < 0)
1176 {
1177 to->si_pid = from->cpt_si_pid;
1178 to->si_uid = from->cpt_si_uid;
1179 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1180 }
1181 else
1182 {
1183 switch (to->si_signo)
1184 {
1185 case SIGCHLD:
1186 to->si_pid = from->cpt_si_pid;
1187 to->si_uid = from->cpt_si_uid;
1188 to->si_status = from->cpt_si_status;
1189 to->si_utime = from->cpt_si_utime;
1190 to->si_stime = from->cpt_si_stime;
1191 break;
1192 case SIGILL:
1193 case SIGFPE:
1194 case SIGSEGV:
1195 case SIGBUS:
1196 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1197 break;
1198 case SIGPOLL:
1199 to->si_band = from->cpt_si_band;
1200 to->si_fd = from->cpt_si_fd;
1201 break;
1202 default:
1203 to->si_pid = from->cpt_si_pid;
1204 to->si_uid = from->cpt_si_uid;
1205 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1206 break;
1207 }
1208 }
1209 }
1210
1211 #endif /* __x86_64__ */
1212
1213 /* Convert a native/host siginfo object, into/from the siginfo in the
1214 layout of the inferiors' architecture. Returns true if any
1215 conversion was done; false otherwise. If DIRECTION is 1, then copy
1216 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1217 INF. */
1218
1219 static int
1220 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1221 {
1222 #ifdef __x86_64__
1223 unsigned int machine;
1224 int tid = lwpid_of (current_inferior);
1225 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1226
1227 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1228 if (!is_64bit_tdesc ())
1229 {
1230 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
1231 fatal ("unexpected difference in siginfo");
1232
1233 if (direction == 0)
1234 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1235 else
1236 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1237
1238 return 1;
1239 }
1240 /* No fixup for native x32 GDB. */
1241 else if (!is_elf64 && sizeof (void *) == 8)
1242 {
1243 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1244 fatal ("unexpected difference in siginfo");
1245
1246 if (direction == 0)
1247 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1248 native);
1249 else
1250 siginfo_from_compat_x32_siginfo (native,
1251 (struct compat_x32_siginfo *) inf);
1252
1253 return 1;
1254 }
1255 #endif
1256
1257 return 0;
1258 }
1259 \f
1260 static int use_xml;
1261
1262 /* Format of XSAVE extended state is:
1263 struct
1264 {
1265 fxsave_bytes[0..463]
1266 sw_usable_bytes[464..511]
1267 xstate_hdr_bytes[512..575]
1268 avx_bytes[576..831]
1269 future_state etc
1270 };
1271
1272 Same memory layout will be used for the coredump NT_X86_XSTATE
1273 representing the XSAVE extended state registers.
1274
1275 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1276 extended state mask, which is the same as the extended control register
1277 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1278 together with the mask saved in the xstate_hdr_bytes to determine what
1279 states the processor/OS supports and what state, used or initialized,
1280 the process/thread is in. */
1281 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1282
1283 /* Does the current host support the GETFPXREGS request? The header
1284 file may or may not define it, and even if it is defined, the
1285 kernel will return EIO if it's running on a pre-SSE processor. */
1286 int have_ptrace_getfpxregs =
1287 #ifdef HAVE_PTRACE_GETFPXREGS
1288 -1
1289 #else
1290 0
1291 #endif
1292 ;
1293
1294 /* Does the current host support PTRACE_GETREGSET? */
1295 static int have_ptrace_getregset = -1;
1296
1297 /* Get Linux/x86 target description from running target. */
1298
1299 static const struct target_desc *
1300 x86_linux_read_description (void)
1301 {
1302 unsigned int machine;
1303 int is_elf64;
1304 int xcr0_features;
1305 int tid;
1306 static uint64_t xcr0;
1307 struct regset_info *regset;
1308
1309 tid = lwpid_of (current_inferior);
1310
1311 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1312
1313 if (sizeof (void *) == 4)
1314 {
1315 if (is_elf64 > 0)
1316 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1317 #ifndef __x86_64__
1318 else if (machine == EM_X86_64)
1319 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1320 #endif
1321 }
1322
1323 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1324 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1325 {
1326 elf_fpxregset_t fpxregs;
1327
1328 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1329 {
1330 have_ptrace_getfpxregs = 0;
1331 have_ptrace_getregset = 0;
1332 return tdesc_i386_mmx_linux;
1333 }
1334 else
1335 have_ptrace_getfpxregs = 1;
1336 }
1337 #endif
1338
1339 if (!use_xml)
1340 {
1341 x86_xcr0 = I386_XSTATE_SSE_MASK;
1342
1343 /* Don't use XML. */
1344 #ifdef __x86_64__
1345 if (machine == EM_X86_64)
1346 return tdesc_amd64_linux_no_xml;
1347 else
1348 #endif
1349 return tdesc_i386_linux_no_xml;
1350 }
1351
1352 if (have_ptrace_getregset == -1)
1353 {
1354 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1355 struct iovec iov;
1356
1357 iov.iov_base = xstateregs;
1358 iov.iov_len = sizeof (xstateregs);
1359
1360 /* Check if PTRACE_GETREGSET works. */
1361 if (ptrace (PTRACE_GETREGSET, tid,
1362 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1363 have_ptrace_getregset = 0;
1364 else
1365 {
1366 have_ptrace_getregset = 1;
1367
1368 /* Get XCR0 from XSAVE extended state. */
1369 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1370 / sizeof (uint64_t))];
1371
1372 /* Use PTRACE_GETREGSET if it is available. */
1373 for (regset = x86_regsets;
1374 regset->fill_function != NULL; regset++)
1375 if (regset->get_request == PTRACE_GETREGSET)
1376 regset->size = I386_XSTATE_SIZE (xcr0);
1377 else if (regset->type != GENERAL_REGS)
1378 regset->size = 0;
1379 }
1380 }
1381
1382 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1383 xcr0_features = (have_ptrace_getregset
1384 && (xcr0 & I386_XSTATE_ALL_MASK));
1385
1386 if (xcr0_features)
1387 x86_xcr0 = xcr0;
1388
1389 if (machine == EM_X86_64)
1390 {
1391 #ifdef __x86_64__
1392 if (is_elf64)
1393 {
1394 if (xcr0_features)
1395 {
1396 switch (xcr0 & I386_XSTATE_ALL_MASK)
1397 {
1398 case I386_XSTATE_AVX512_MASK:
1399 return tdesc_amd64_avx512_linux;
1400
1401 case I386_XSTATE_MPX_MASK:
1402 return tdesc_amd64_mpx_linux;
1403
1404 case I386_XSTATE_AVX_MASK:
1405 return tdesc_amd64_avx_linux;
1406
1407 default:
1408 return tdesc_amd64_linux;
1409 }
1410 }
1411 else
1412 return tdesc_amd64_linux;
1413 }
1414 else
1415 {
1416 if (xcr0_features)
1417 {
1418 switch (xcr0 & I386_XSTATE_ALL_MASK)
1419 {
1420 case I386_XSTATE_AVX512_MASK:
1421 return tdesc_x32_avx512_linux;
1422
1423 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1424 case I386_XSTATE_AVX_MASK:
1425 return tdesc_x32_avx_linux;
1426
1427 default:
1428 return tdesc_x32_linux;
1429 }
1430 }
1431 else
1432 return tdesc_x32_linux;
1433 }
1434 #endif
1435 }
1436 else
1437 {
1438 if (xcr0_features)
1439 {
1440 switch (xcr0 & I386_XSTATE_ALL_MASK)
1441 {
1442 case (I386_XSTATE_AVX512_MASK):
1443 return tdesc_i386_avx512_linux;
1444
1445 case (I386_XSTATE_MPX_MASK):
1446 return tdesc_i386_mpx_linux;
1447
1448 case (I386_XSTATE_AVX_MASK):
1449 return tdesc_i386_avx_linux;
1450
1451 default:
1452 return tdesc_i386_linux;
1453 }
1454 }
1455 else
1456 return tdesc_i386_linux;
1457 }
1458
1459 gdb_assert_not_reached ("failed to return tdesc");
1460 }
1461
1462 /* Callback for find_inferior. Stops iteration when a thread with a
1463 given PID is found. */
1464
1465 static int
1466 same_process_callback (struct inferior_list_entry *entry, void *data)
1467 {
1468 int pid = *(int *) data;
1469
1470 return (ptid_get_pid (entry->id) == pid);
1471 }
1472
1473 /* Callback for for_each_inferior. Calls the arch_setup routine for
1474 each process. */
1475
1476 static void
1477 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1478 {
1479 int pid = ptid_get_pid (entry->id);
1480
1481 /* Look up any thread of this processes. */
1482 current_inferior
1483 = (struct thread_info *) find_inferior (&all_threads,
1484 same_process_callback, &pid);
1485
1486 the_low_target.arch_setup ();
1487 }
1488
1489 /* Update all the target description of all processes; a new GDB
1490 connected, and it may or not support xml target descriptions. */
1491
1492 static void
1493 x86_linux_update_xmltarget (void)
1494 {
1495 struct thread_info *save_inferior = current_inferior;
1496
1497 /* Before changing the register cache's internal layout, flush the
1498 contents of the current valid caches back to the threads, and
1499 release the current regcache objects. */
1500 regcache_release ();
1501
1502 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1503
1504 current_inferior = save_inferior;
1505 }
1506
1507 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1508 PTRACE_GETREGSET. */
1509
1510 static void
1511 x86_linux_process_qsupported (const char *query)
1512 {
1513 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1514 with "i386" in qSupported query, it supports x86 XML target
1515 descriptions. */
1516 use_xml = 0;
1517 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1518 {
1519 char *copy = xstrdup (query + 13);
1520 char *p;
1521
1522 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1523 {
1524 if (strcmp (p, "i386") == 0)
1525 {
1526 use_xml = 1;
1527 break;
1528 }
1529 }
1530
1531 free (copy);
1532 }
1533
1534 x86_linux_update_xmltarget ();
1535 }
1536
1537 /* Common for x86/x86-64. */
1538
1539 static struct regsets_info x86_regsets_info =
1540 {
1541 x86_regsets, /* regsets */
1542 0, /* num_regsets */
1543 NULL, /* disabled_regsets */
1544 };
1545
1546 #ifdef __x86_64__
1547 static struct regs_info amd64_linux_regs_info =
1548 {
1549 NULL, /* regset_bitmap */
1550 NULL, /* usrregs_info */
1551 &x86_regsets_info
1552 };
1553 #endif
1554 static struct usrregs_info i386_linux_usrregs_info =
1555 {
1556 I386_NUM_REGS,
1557 i386_regmap,
1558 };
1559
1560 static struct regs_info i386_linux_regs_info =
1561 {
1562 NULL, /* regset_bitmap */
1563 &i386_linux_usrregs_info,
1564 &x86_regsets_info
1565 };
1566
1567 const struct regs_info *
1568 x86_linux_regs_info (void)
1569 {
1570 #ifdef __x86_64__
1571 if (is_64bit_tdesc ())
1572 return &amd64_linux_regs_info;
1573 else
1574 #endif
1575 return &i386_linux_regs_info;
1576 }
1577
1578 /* Initialize the target description for the architecture of the
1579 inferior. */
1580
1581 static void
1582 x86_arch_setup (void)
1583 {
1584 current_process ()->tdesc = x86_linux_read_description ();
1585 }
1586
1587 static int
1588 x86_supports_tracepoints (void)
1589 {
1590 return 1;
1591 }
1592
1593 static void
1594 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1595 {
1596 write_inferior_memory (*to, buf, len);
1597 *to += len;
1598 }
1599
1600 static int
1601 push_opcode (unsigned char *buf, char *op)
1602 {
1603 unsigned char *buf_org = buf;
1604
1605 while (1)
1606 {
1607 char *endptr;
1608 unsigned long ul = strtoul (op, &endptr, 16);
1609
1610 if (endptr == op)
1611 break;
1612
1613 *buf++ = ul;
1614 op = endptr;
1615 }
1616
1617 return buf - buf_org;
1618 }
1619
1620 #ifdef __x86_64__
1621
1622 /* Build a jump pad that saves registers and calls a collection
1623 function. Writes a jump instruction to the jump pad to
1624 JJUMPAD_INSN. The caller is responsible to write it in at the
1625 tracepoint address. */
1626
1627 static int
1628 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1629 CORE_ADDR collector,
1630 CORE_ADDR lockaddr,
1631 ULONGEST orig_size,
1632 CORE_ADDR *jump_entry,
1633 CORE_ADDR *trampoline,
1634 ULONGEST *trampoline_size,
1635 unsigned char *jjump_pad_insn,
1636 ULONGEST *jjump_pad_insn_size,
1637 CORE_ADDR *adjusted_insn_addr,
1638 CORE_ADDR *adjusted_insn_addr_end,
1639 char *err)
1640 {
1641 unsigned char buf[40];
1642 int i, offset;
1643 int64_t loffset;
1644
1645 CORE_ADDR buildaddr = *jump_entry;
1646
1647 /* Build the jump pad. */
1648
1649 /* First, do tracepoint data collection. Save registers. */
1650 i = 0;
1651 /* Need to ensure stack pointer saved first. */
1652 buf[i++] = 0x54; /* push %rsp */
1653 buf[i++] = 0x55; /* push %rbp */
1654 buf[i++] = 0x57; /* push %rdi */
1655 buf[i++] = 0x56; /* push %rsi */
1656 buf[i++] = 0x52; /* push %rdx */
1657 buf[i++] = 0x51; /* push %rcx */
1658 buf[i++] = 0x53; /* push %rbx */
1659 buf[i++] = 0x50; /* push %rax */
1660 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1661 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1662 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1663 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1664 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1665 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1666 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1667 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1668 buf[i++] = 0x9c; /* pushfq */
1669 buf[i++] = 0x48; /* movl <addr>,%rdi */
1670 buf[i++] = 0xbf;
1671 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1672 i += sizeof (unsigned long);
1673 buf[i++] = 0x57; /* push %rdi */
1674 append_insns (&buildaddr, i, buf);
1675
1676 /* Stack space for the collecting_t object. */
1677 i = 0;
1678 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1679 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1680 memcpy (buf + i, &tpoint, 8);
1681 i += 8;
1682 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1683 i += push_opcode (&buf[i],
1684 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1685 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1686 append_insns (&buildaddr, i, buf);
1687
1688 /* spin-lock. */
1689 i = 0;
1690 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1691 memcpy (&buf[i], (void *) &lockaddr, 8);
1692 i += 8;
1693 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1694 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1695 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1696 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1697 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1698 append_insns (&buildaddr, i, buf);
1699
1700 /* Set up the gdb_collect call. */
1701 /* At this point, (stack pointer + 0x18) is the base of our saved
1702 register block. */
1703
1704 i = 0;
1705 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1706 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1707
1708 /* tpoint address may be 64-bit wide. */
1709 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1710 memcpy (buf + i, &tpoint, 8);
1711 i += 8;
1712 append_insns (&buildaddr, i, buf);
1713
1714 /* The collector function being in the shared library, may be
1715 >31-bits away off the jump pad. */
1716 i = 0;
1717 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1718 memcpy (buf + i, &collector, 8);
1719 i += 8;
1720 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1721 append_insns (&buildaddr, i, buf);
1722
1723 /* Clear the spin-lock. */
1724 i = 0;
1725 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1726 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1727 memcpy (buf + i, &lockaddr, 8);
1728 i += 8;
1729 append_insns (&buildaddr, i, buf);
1730
1731 /* Remove stack that had been used for the collect_t object. */
1732 i = 0;
1733 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1734 append_insns (&buildaddr, i, buf);
1735
1736 /* Restore register state. */
1737 i = 0;
1738 buf[i++] = 0x48; /* add $0x8,%rsp */
1739 buf[i++] = 0x83;
1740 buf[i++] = 0xc4;
1741 buf[i++] = 0x08;
1742 buf[i++] = 0x9d; /* popfq */
1743 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1744 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1745 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1746 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1747 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1748 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1749 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1750 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1751 buf[i++] = 0x58; /* pop %rax */
1752 buf[i++] = 0x5b; /* pop %rbx */
1753 buf[i++] = 0x59; /* pop %rcx */
1754 buf[i++] = 0x5a; /* pop %rdx */
1755 buf[i++] = 0x5e; /* pop %rsi */
1756 buf[i++] = 0x5f; /* pop %rdi */
1757 buf[i++] = 0x5d; /* pop %rbp */
1758 buf[i++] = 0x5c; /* pop %rsp */
1759 append_insns (&buildaddr, i, buf);
1760
1761 /* Now, adjust the original instruction to execute in the jump
1762 pad. */
1763 *adjusted_insn_addr = buildaddr;
1764 relocate_instruction (&buildaddr, tpaddr);
1765 *adjusted_insn_addr_end = buildaddr;
1766
1767 /* Finally, write a jump back to the program. */
1768
1769 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1770 if (loffset > INT_MAX || loffset < INT_MIN)
1771 {
1772 sprintf (err,
1773 "E.Jump back from jump pad too far from tracepoint "
1774 "(offset 0x%" PRIx64 " > int32).", loffset);
1775 return 1;
1776 }
1777
1778 offset = (int) loffset;
1779 memcpy (buf, jump_insn, sizeof (jump_insn));
1780 memcpy (buf + 1, &offset, 4);
1781 append_insns (&buildaddr, sizeof (jump_insn), buf);
1782
1783 /* The jump pad is now built. Wire in a jump to our jump pad. This
1784 is always done last (by our caller actually), so that we can
1785 install fast tracepoints with threads running. This relies on
1786 the agent's atomic write support. */
1787 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1788 if (loffset > INT_MAX || loffset < INT_MIN)
1789 {
1790 sprintf (err,
1791 "E.Jump pad too far from tracepoint "
1792 "(offset 0x%" PRIx64 " > int32).", loffset);
1793 return 1;
1794 }
1795
1796 offset = (int) loffset;
1797
1798 memcpy (buf, jump_insn, sizeof (jump_insn));
1799 memcpy (buf + 1, &offset, 4);
1800 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1801 *jjump_pad_insn_size = sizeof (jump_insn);
1802
1803 /* Return the end address of our pad. */
1804 *jump_entry = buildaddr;
1805
1806 return 0;
1807 }
1808
1809 #endif /* __x86_64__ */
1810
1811 /* Build a jump pad that saves registers and calls a collection
1812 function. Writes a jump instruction to the jump pad to
1813 JJUMPAD_INSN. The caller is responsible to write it in at the
1814 tracepoint address. */
1815
1816 static int
1817 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1818 CORE_ADDR collector,
1819 CORE_ADDR lockaddr,
1820 ULONGEST orig_size,
1821 CORE_ADDR *jump_entry,
1822 CORE_ADDR *trampoline,
1823 ULONGEST *trampoline_size,
1824 unsigned char *jjump_pad_insn,
1825 ULONGEST *jjump_pad_insn_size,
1826 CORE_ADDR *adjusted_insn_addr,
1827 CORE_ADDR *adjusted_insn_addr_end,
1828 char *err)
1829 {
1830 unsigned char buf[0x100];
1831 int i, offset;
1832 CORE_ADDR buildaddr = *jump_entry;
1833
1834 /* Build the jump pad. */
1835
1836 /* First, do tracepoint data collection. Save registers. */
1837 i = 0;
1838 buf[i++] = 0x60; /* pushad */
1839 buf[i++] = 0x68; /* push tpaddr aka $pc */
1840 *((int *)(buf + i)) = (int) tpaddr;
1841 i += 4;
1842 buf[i++] = 0x9c; /* pushf */
1843 buf[i++] = 0x1e; /* push %ds */
1844 buf[i++] = 0x06; /* push %es */
1845 buf[i++] = 0x0f; /* push %fs */
1846 buf[i++] = 0xa0;
1847 buf[i++] = 0x0f; /* push %gs */
1848 buf[i++] = 0xa8;
1849 buf[i++] = 0x16; /* push %ss */
1850 buf[i++] = 0x0e; /* push %cs */
1851 append_insns (&buildaddr, i, buf);
1852
1853 /* Stack space for the collecting_t object. */
1854 i = 0;
1855 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1856
1857 /* Build the object. */
1858 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1859 memcpy (buf + i, &tpoint, 4);
1860 i += 4;
1861 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1862
1863 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1864 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1865 append_insns (&buildaddr, i, buf);
1866
1867 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1868 If we cared for it, this could be using xchg alternatively. */
1869
1870 i = 0;
1871 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1872 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1873 %esp,<lockaddr> */
1874 memcpy (&buf[i], (void *) &lockaddr, 4);
1875 i += 4;
1876 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1877 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1878 append_insns (&buildaddr, i, buf);
1879
1880
1881 /* Set up arguments to the gdb_collect call. */
1882 i = 0;
1883 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1884 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1885 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1886 append_insns (&buildaddr, i, buf);
1887
1888 i = 0;
1889 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1890 append_insns (&buildaddr, i, buf);
1891
1892 i = 0;
1893 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1894 memcpy (&buf[i], (void *) &tpoint, 4);
1895 i += 4;
1896 append_insns (&buildaddr, i, buf);
1897
1898 buf[0] = 0xe8; /* call <reladdr> */
1899 offset = collector - (buildaddr + sizeof (jump_insn));
1900 memcpy (buf + 1, &offset, 4);
1901 append_insns (&buildaddr, 5, buf);
1902 /* Clean up after the call. */
1903 buf[0] = 0x83; /* add $0x8,%esp */
1904 buf[1] = 0xc4;
1905 buf[2] = 0x08;
1906 append_insns (&buildaddr, 3, buf);
1907
1908
1909 /* Clear the spin-lock. This would need the LOCK prefix on older
1910 broken archs. */
1911 i = 0;
1912 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1913 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1914 memcpy (buf + i, &lockaddr, 4);
1915 i += 4;
1916 append_insns (&buildaddr, i, buf);
1917
1918
1919 /* Remove stack that had been used for the collect_t object. */
1920 i = 0;
1921 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1922 append_insns (&buildaddr, i, buf);
1923
1924 i = 0;
1925 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1926 buf[i++] = 0xc4;
1927 buf[i++] = 0x04;
1928 buf[i++] = 0x17; /* pop %ss */
1929 buf[i++] = 0x0f; /* pop %gs */
1930 buf[i++] = 0xa9;
1931 buf[i++] = 0x0f; /* pop %fs */
1932 buf[i++] = 0xa1;
1933 buf[i++] = 0x07; /* pop %es */
1934 buf[i++] = 0x1f; /* pop %ds */
1935 buf[i++] = 0x9d; /* popf */
1936 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1937 buf[i++] = 0xc4;
1938 buf[i++] = 0x04;
1939 buf[i++] = 0x61; /* popad */
1940 append_insns (&buildaddr, i, buf);
1941
1942 /* Now, adjust the original instruction to execute in the jump
1943 pad. */
1944 *adjusted_insn_addr = buildaddr;
1945 relocate_instruction (&buildaddr, tpaddr);
1946 *adjusted_insn_addr_end = buildaddr;
1947
1948 /* Write the jump back to the program. */
1949 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1950 memcpy (buf, jump_insn, sizeof (jump_insn));
1951 memcpy (buf + 1, &offset, 4);
1952 append_insns (&buildaddr, sizeof (jump_insn), buf);
1953
1954 /* The jump pad is now built. Wire in a jump to our jump pad. This
1955 is always done last (by our caller actually), so that we can
1956 install fast tracepoints with threads running. This relies on
1957 the agent's atomic write support. */
1958 if (orig_size == 4)
1959 {
1960 /* Create a trampoline. */
1961 *trampoline_size = sizeof (jump_insn);
1962 if (!claim_trampoline_space (*trampoline_size, trampoline))
1963 {
1964 /* No trampoline space available. */
1965 strcpy (err,
1966 "E.Cannot allocate trampoline space needed for fast "
1967 "tracepoints on 4-byte instructions.");
1968 return 1;
1969 }
1970
1971 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1972 memcpy (buf, jump_insn, sizeof (jump_insn));
1973 memcpy (buf + 1, &offset, 4);
1974 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1975
1976 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1977 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1978 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1979 memcpy (buf + 2, &offset, 2);
1980 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1981 *jjump_pad_insn_size = sizeof (small_jump_insn);
1982 }
1983 else
1984 {
1985 /* Else use a 32-bit relative jump instruction. */
1986 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1987 memcpy (buf, jump_insn, sizeof (jump_insn));
1988 memcpy (buf + 1, &offset, 4);
1989 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1990 *jjump_pad_insn_size = sizeof (jump_insn);
1991 }
1992
1993 /* Return the end address of our pad. */
1994 *jump_entry = buildaddr;
1995
1996 return 0;
1997 }
1998
1999 static int
2000 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2001 CORE_ADDR collector,
2002 CORE_ADDR lockaddr,
2003 ULONGEST orig_size,
2004 CORE_ADDR *jump_entry,
2005 CORE_ADDR *trampoline,
2006 ULONGEST *trampoline_size,
2007 unsigned char *jjump_pad_insn,
2008 ULONGEST *jjump_pad_insn_size,
2009 CORE_ADDR *adjusted_insn_addr,
2010 CORE_ADDR *adjusted_insn_addr_end,
2011 char *err)
2012 {
2013 #ifdef __x86_64__
2014 if (is_64bit_tdesc ())
2015 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2016 collector, lockaddr,
2017 orig_size, jump_entry,
2018 trampoline, trampoline_size,
2019 jjump_pad_insn,
2020 jjump_pad_insn_size,
2021 adjusted_insn_addr,
2022 adjusted_insn_addr_end,
2023 err);
2024 #endif
2025
2026 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2027 collector, lockaddr,
2028 orig_size, jump_entry,
2029 trampoline, trampoline_size,
2030 jjump_pad_insn,
2031 jjump_pad_insn_size,
2032 adjusted_insn_addr,
2033 adjusted_insn_addr_end,
2034 err);
2035 }
2036
2037 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2038 architectures. */
2039
2040 static int
2041 x86_get_min_fast_tracepoint_insn_len (void)
2042 {
2043 static int warned_about_fast_tracepoints = 0;
2044
2045 #ifdef __x86_64__
2046 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2047 used for fast tracepoints. */
2048 if (is_64bit_tdesc ())
2049 return 5;
2050 #endif
2051
2052 if (agent_loaded_p ())
2053 {
2054 char errbuf[IPA_BUFSIZ];
2055
2056 errbuf[0] = '\0';
2057
2058 /* On x86, if trampolines are available, then 4-byte jump instructions
2059 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2060 with a 4-byte offset are used instead. */
2061 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2062 return 4;
2063 else
2064 {
2065 /* GDB has no channel to explain to user why a shorter fast
2066 tracepoint is not possible, but at least make GDBserver
2067 mention that something has gone awry. */
2068 if (!warned_about_fast_tracepoints)
2069 {
2070 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2071 warned_about_fast_tracepoints = 1;
2072 }
2073 return 5;
2074 }
2075 }
2076 else
2077 {
2078 /* Indicate that the minimum length is currently unknown since the IPA
2079 has not loaded yet. */
2080 return 0;
2081 }
2082 }
2083
2084 static void
2085 add_insns (unsigned char *start, int len)
2086 {
2087 CORE_ADDR buildaddr = current_insn_ptr;
2088
2089 if (debug_threads)
2090 debug_printf ("Adding %d bytes of insn at %s\n",
2091 len, paddress (buildaddr));
2092
2093 append_insns (&buildaddr, len, start);
2094 current_insn_ptr = buildaddr;
2095 }
2096
2097 /* Our general strategy for emitting code is to avoid specifying raw
2098 bytes whenever possible, and instead copy a block of inline asm
2099 that is embedded in the function. This is a little messy, because
2100 we need to keep the compiler from discarding what looks like dead
2101 code, plus suppress various warnings. */
2102
2103 #define EMIT_ASM(NAME, INSNS) \
2104 do \
2105 { \
2106 extern unsigned char start_ ## NAME, end_ ## NAME; \
2107 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2108 __asm__ ("jmp end_" #NAME "\n" \
2109 "\t" "start_" #NAME ":" \
2110 "\t" INSNS "\n" \
2111 "\t" "end_" #NAME ":"); \
2112 } while (0)
2113
2114 #ifdef __x86_64__
2115
2116 #define EMIT_ASM32(NAME,INSNS) \
2117 do \
2118 { \
2119 extern unsigned char start_ ## NAME, end_ ## NAME; \
2120 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2121 __asm__ (".code32\n" \
2122 "\t" "jmp end_" #NAME "\n" \
2123 "\t" "start_" #NAME ":\n" \
2124 "\t" INSNS "\n" \
2125 "\t" "end_" #NAME ":\n" \
2126 ".code64\n"); \
2127 } while (0)
2128
2129 #else
2130
2131 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2132
2133 #endif
2134
2135 #ifdef __x86_64__
2136
2137 static void
2138 amd64_emit_prologue (void)
2139 {
2140 EMIT_ASM (amd64_prologue,
2141 "pushq %rbp\n\t"
2142 "movq %rsp,%rbp\n\t"
2143 "sub $0x20,%rsp\n\t"
2144 "movq %rdi,-8(%rbp)\n\t"
2145 "movq %rsi,-16(%rbp)");
2146 }
2147
2148
2149 static void
2150 amd64_emit_epilogue (void)
2151 {
2152 EMIT_ASM (amd64_epilogue,
2153 "movq -16(%rbp),%rdi\n\t"
2154 "movq %rax,(%rdi)\n\t"
2155 "xor %rax,%rax\n\t"
2156 "leave\n\t"
2157 "ret");
2158 }
2159
2160 static void
2161 amd64_emit_add (void)
2162 {
2163 EMIT_ASM (amd64_add,
2164 "add (%rsp),%rax\n\t"
2165 "lea 0x8(%rsp),%rsp");
2166 }
2167
2168 static void
2169 amd64_emit_sub (void)
2170 {
2171 EMIT_ASM (amd64_sub,
2172 "sub %rax,(%rsp)\n\t"
2173 "pop %rax");
2174 }
2175
2176 static void
2177 amd64_emit_mul (void)
2178 {
2179 emit_error = 1;
2180 }
2181
2182 static void
2183 amd64_emit_lsh (void)
2184 {
2185 emit_error = 1;
2186 }
2187
2188 static void
2189 amd64_emit_rsh_signed (void)
2190 {
2191 emit_error = 1;
2192 }
2193
2194 static void
2195 amd64_emit_rsh_unsigned (void)
2196 {
2197 emit_error = 1;
2198 }
2199
2200 static void
2201 amd64_emit_ext (int arg)
2202 {
2203 switch (arg)
2204 {
2205 case 8:
2206 EMIT_ASM (amd64_ext_8,
2207 "cbtw\n\t"
2208 "cwtl\n\t"
2209 "cltq");
2210 break;
2211 case 16:
2212 EMIT_ASM (amd64_ext_16,
2213 "cwtl\n\t"
2214 "cltq");
2215 break;
2216 case 32:
2217 EMIT_ASM (amd64_ext_32,
2218 "cltq");
2219 break;
2220 default:
2221 emit_error = 1;
2222 }
2223 }
2224
2225 static void
2226 amd64_emit_log_not (void)
2227 {
2228 EMIT_ASM (amd64_log_not,
2229 "test %rax,%rax\n\t"
2230 "sete %cl\n\t"
2231 "movzbq %cl,%rax");
2232 }
2233
2234 static void
2235 amd64_emit_bit_and (void)
2236 {
2237 EMIT_ASM (amd64_and,
2238 "and (%rsp),%rax\n\t"
2239 "lea 0x8(%rsp),%rsp");
2240 }
2241
2242 static void
2243 amd64_emit_bit_or (void)
2244 {
2245 EMIT_ASM (amd64_or,
2246 "or (%rsp),%rax\n\t"
2247 "lea 0x8(%rsp),%rsp");
2248 }
2249
2250 static void
2251 amd64_emit_bit_xor (void)
2252 {
2253 EMIT_ASM (amd64_xor,
2254 "xor (%rsp),%rax\n\t"
2255 "lea 0x8(%rsp),%rsp");
2256 }
2257
2258 static void
2259 amd64_emit_bit_not (void)
2260 {
2261 EMIT_ASM (amd64_bit_not,
2262 "xorq $0xffffffffffffffff,%rax");
2263 }
2264
2265 static void
2266 amd64_emit_equal (void)
2267 {
2268 EMIT_ASM (amd64_equal,
2269 "cmp %rax,(%rsp)\n\t"
2270 "je .Lamd64_equal_true\n\t"
2271 "xor %rax,%rax\n\t"
2272 "jmp .Lamd64_equal_end\n\t"
2273 ".Lamd64_equal_true:\n\t"
2274 "mov $0x1,%rax\n\t"
2275 ".Lamd64_equal_end:\n\t"
2276 "lea 0x8(%rsp),%rsp");
2277 }
2278
2279 static void
2280 amd64_emit_less_signed (void)
2281 {
2282 EMIT_ASM (amd64_less_signed,
2283 "cmp %rax,(%rsp)\n\t"
2284 "jl .Lamd64_less_signed_true\n\t"
2285 "xor %rax,%rax\n\t"
2286 "jmp .Lamd64_less_signed_end\n\t"
2287 ".Lamd64_less_signed_true:\n\t"
2288 "mov $1,%rax\n\t"
2289 ".Lamd64_less_signed_end:\n\t"
2290 "lea 0x8(%rsp),%rsp");
2291 }
2292
2293 static void
2294 amd64_emit_less_unsigned (void)
2295 {
2296 EMIT_ASM (amd64_less_unsigned,
2297 "cmp %rax,(%rsp)\n\t"
2298 "jb .Lamd64_less_unsigned_true\n\t"
2299 "xor %rax,%rax\n\t"
2300 "jmp .Lamd64_less_unsigned_end\n\t"
2301 ".Lamd64_less_unsigned_true:\n\t"
2302 "mov $1,%rax\n\t"
2303 ".Lamd64_less_unsigned_end:\n\t"
2304 "lea 0x8(%rsp),%rsp");
2305 }
2306
2307 static void
2308 amd64_emit_ref (int size)
2309 {
2310 switch (size)
2311 {
2312 case 1:
2313 EMIT_ASM (amd64_ref1,
2314 "movb (%rax),%al");
2315 break;
2316 case 2:
2317 EMIT_ASM (amd64_ref2,
2318 "movw (%rax),%ax");
2319 break;
2320 case 4:
2321 EMIT_ASM (amd64_ref4,
2322 "movl (%rax),%eax");
2323 break;
2324 case 8:
2325 EMIT_ASM (amd64_ref8,
2326 "movq (%rax),%rax");
2327 break;
2328 }
2329 }
2330
2331 static void
2332 amd64_emit_if_goto (int *offset_p, int *size_p)
2333 {
2334 EMIT_ASM (amd64_if_goto,
2335 "mov %rax,%rcx\n\t"
2336 "pop %rax\n\t"
2337 "cmp $0,%rcx\n\t"
2338 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2339 if (offset_p)
2340 *offset_p = 10;
2341 if (size_p)
2342 *size_p = 4;
2343 }
2344
2345 static void
2346 amd64_emit_goto (int *offset_p, int *size_p)
2347 {
2348 EMIT_ASM (amd64_goto,
2349 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2350 if (offset_p)
2351 *offset_p = 1;
2352 if (size_p)
2353 *size_p = 4;
2354 }
2355
2356 static void
2357 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2358 {
2359 int diff = (to - (from + size));
2360 unsigned char buf[sizeof (int)];
2361
2362 if (size != 4)
2363 {
2364 emit_error = 1;
2365 return;
2366 }
2367
2368 memcpy (buf, &diff, sizeof (int));
2369 write_inferior_memory (from, buf, sizeof (int));
2370 }
2371
2372 static void
2373 amd64_emit_const (LONGEST num)
2374 {
2375 unsigned char buf[16];
2376 int i;
2377 CORE_ADDR buildaddr = current_insn_ptr;
2378
2379 i = 0;
2380 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2381 memcpy (&buf[i], &num, sizeof (num));
2382 i += 8;
2383 append_insns (&buildaddr, i, buf);
2384 current_insn_ptr = buildaddr;
2385 }
2386
2387 static void
2388 amd64_emit_call (CORE_ADDR fn)
2389 {
2390 unsigned char buf[16];
2391 int i;
2392 CORE_ADDR buildaddr;
2393 LONGEST offset64;
2394
2395 /* The destination function being in the shared library, may be
2396 >31-bits away off the compiled code pad. */
2397
2398 buildaddr = current_insn_ptr;
2399
2400 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2401
2402 i = 0;
2403
2404 if (offset64 > INT_MAX || offset64 < INT_MIN)
2405 {
2406 /* Offset is too large for a call. Use callq, but that requires
2407 a register, so avoid it if possible. Use r10, since it is
2408 call-clobbered, we don't have to push/pop it. */
2409 buf[i++] = 0x48; /* mov $fn,%r10 */
2410 buf[i++] = 0xba;
2411 memcpy (buf + i, &fn, 8);
2412 i += 8;
2413 buf[i++] = 0xff; /* callq *%r10 */
2414 buf[i++] = 0xd2;
2415 }
2416 else
2417 {
2418 int offset32 = offset64; /* we know we can't overflow here. */
2419 memcpy (buf + i, &offset32, 4);
2420 i += 4;
2421 }
2422
2423 append_insns (&buildaddr, i, buf);
2424 current_insn_ptr = buildaddr;
2425 }
2426
2427 static void
2428 amd64_emit_reg (int reg)
2429 {
2430 unsigned char buf[16];
2431 int i;
2432 CORE_ADDR buildaddr;
2433
2434 /* Assume raw_regs is still in %rdi. */
2435 buildaddr = current_insn_ptr;
2436 i = 0;
2437 buf[i++] = 0xbe; /* mov $<n>,%esi */
2438 memcpy (&buf[i], &reg, sizeof (reg));
2439 i += 4;
2440 append_insns (&buildaddr, i, buf);
2441 current_insn_ptr = buildaddr;
2442 amd64_emit_call (get_raw_reg_func_addr ());
2443 }
2444
2445 static void
2446 amd64_emit_pop (void)
2447 {
2448 EMIT_ASM (amd64_pop,
2449 "pop %rax");
2450 }
2451
2452 static void
2453 amd64_emit_stack_flush (void)
2454 {
2455 EMIT_ASM (amd64_stack_flush,
2456 "push %rax");
2457 }
2458
2459 static void
2460 amd64_emit_zero_ext (int arg)
2461 {
2462 switch (arg)
2463 {
2464 case 8:
2465 EMIT_ASM (amd64_zero_ext_8,
2466 "and $0xff,%rax");
2467 break;
2468 case 16:
2469 EMIT_ASM (amd64_zero_ext_16,
2470 "and $0xffff,%rax");
2471 break;
2472 case 32:
2473 EMIT_ASM (amd64_zero_ext_32,
2474 "mov $0xffffffff,%rcx\n\t"
2475 "and %rcx,%rax");
2476 break;
2477 default:
2478 emit_error = 1;
2479 }
2480 }
2481
2482 static void
2483 amd64_emit_swap (void)
2484 {
2485 EMIT_ASM (amd64_swap,
2486 "mov %rax,%rcx\n\t"
2487 "pop %rax\n\t"
2488 "push %rcx");
2489 }
2490
2491 static void
2492 amd64_emit_stack_adjust (int n)
2493 {
2494 unsigned char buf[16];
2495 int i;
2496 CORE_ADDR buildaddr = current_insn_ptr;
2497
2498 i = 0;
2499 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2500 buf[i++] = 0x8d;
2501 buf[i++] = 0x64;
2502 buf[i++] = 0x24;
2503 /* This only handles adjustments up to 16, but we don't expect any more. */
2504 buf[i++] = n * 8;
2505 append_insns (&buildaddr, i, buf);
2506 current_insn_ptr = buildaddr;
2507 }
2508
2509 /* FN's prototype is `LONGEST(*fn)(int)'. */
2510
2511 static void
2512 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2513 {
2514 unsigned char buf[16];
2515 int i;
2516 CORE_ADDR buildaddr;
2517
2518 buildaddr = current_insn_ptr;
2519 i = 0;
2520 buf[i++] = 0xbf; /* movl $<n>,%edi */
2521 memcpy (&buf[i], &arg1, sizeof (arg1));
2522 i += 4;
2523 append_insns (&buildaddr, i, buf);
2524 current_insn_ptr = buildaddr;
2525 amd64_emit_call (fn);
2526 }
2527
2528 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2529
2530 static void
2531 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2532 {
2533 unsigned char buf[16];
2534 int i;
2535 CORE_ADDR buildaddr;
2536
2537 buildaddr = current_insn_ptr;
2538 i = 0;
2539 buf[i++] = 0xbf; /* movl $<n>,%edi */
2540 memcpy (&buf[i], &arg1, sizeof (arg1));
2541 i += 4;
2542 append_insns (&buildaddr, i, buf);
2543 current_insn_ptr = buildaddr;
2544 EMIT_ASM (amd64_void_call_2_a,
2545 /* Save away a copy of the stack top. */
2546 "push %rax\n\t"
2547 /* Also pass top as the second argument. */
2548 "mov %rax,%rsi");
2549 amd64_emit_call (fn);
2550 EMIT_ASM (amd64_void_call_2_b,
2551 /* Restore the stack top, %rax may have been trashed. */
2552 "pop %rax");
2553 }
2554
2555 void
2556 amd64_emit_eq_goto (int *offset_p, int *size_p)
2557 {
2558 EMIT_ASM (amd64_eq,
2559 "cmp %rax,(%rsp)\n\t"
2560 "jne .Lamd64_eq_fallthru\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2562 "pop %rax\n\t"
2563 /* jmp, but don't trust the assembler to choose the right jump */
2564 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2565 ".Lamd64_eq_fallthru:\n\t"
2566 "lea 0x8(%rsp),%rsp\n\t"
2567 "pop %rax");
2568
2569 if (offset_p)
2570 *offset_p = 13;
2571 if (size_p)
2572 *size_p = 4;
2573 }
2574
2575 void
2576 amd64_emit_ne_goto (int *offset_p, int *size_p)
2577 {
2578 EMIT_ASM (amd64_ne,
2579 "cmp %rax,(%rsp)\n\t"
2580 "je .Lamd64_ne_fallthru\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2582 "pop %rax\n\t"
2583 /* jmp, but don't trust the assembler to choose the right jump */
2584 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2585 ".Lamd64_ne_fallthru:\n\t"
2586 "lea 0x8(%rsp),%rsp\n\t"
2587 "pop %rax");
2588
2589 if (offset_p)
2590 *offset_p = 13;
2591 if (size_p)
2592 *size_p = 4;
2593 }
2594
2595 void
2596 amd64_emit_lt_goto (int *offset_p, int *size_p)
2597 {
2598 EMIT_ASM (amd64_lt,
2599 "cmp %rax,(%rsp)\n\t"
2600 "jnl .Lamd64_lt_fallthru\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2602 "pop %rax\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Lamd64_lt_fallthru:\n\t"
2606 "lea 0x8(%rsp),%rsp\n\t"
2607 "pop %rax");
2608
2609 if (offset_p)
2610 *offset_p = 13;
2611 if (size_p)
2612 *size_p = 4;
2613 }
2614
2615 void
2616 amd64_emit_le_goto (int *offset_p, int *size_p)
2617 {
2618 EMIT_ASM (amd64_le,
2619 "cmp %rax,(%rsp)\n\t"
2620 "jnle .Lamd64_le_fallthru\n\t"
2621 "lea 0x8(%rsp),%rsp\n\t"
2622 "pop %rax\n\t"
2623 /* jmp, but don't trust the assembler to choose the right jump */
2624 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2625 ".Lamd64_le_fallthru:\n\t"
2626 "lea 0x8(%rsp),%rsp\n\t"
2627 "pop %rax");
2628
2629 if (offset_p)
2630 *offset_p = 13;
2631 if (size_p)
2632 *size_p = 4;
2633 }
2634
2635 void
2636 amd64_emit_gt_goto (int *offset_p, int *size_p)
2637 {
2638 EMIT_ASM (amd64_gt,
2639 "cmp %rax,(%rsp)\n\t"
2640 "jng .Lamd64_gt_fallthru\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2642 "pop %rax\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Lamd64_gt_fallthru:\n\t"
2646 "lea 0x8(%rsp),%rsp\n\t"
2647 "pop %rax");
2648
2649 if (offset_p)
2650 *offset_p = 13;
2651 if (size_p)
2652 *size_p = 4;
2653 }
2654
2655 void
2656 amd64_emit_ge_goto (int *offset_p, int *size_p)
2657 {
2658 EMIT_ASM (amd64_ge,
2659 "cmp %rax,(%rsp)\n\t"
2660 "jnge .Lamd64_ge_fallthru\n\t"
2661 ".Lamd64_ge_jump:\n\t"
2662 "lea 0x8(%rsp),%rsp\n\t"
2663 "pop %rax\n\t"
2664 /* jmp, but don't trust the assembler to choose the right jump */
2665 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2666 ".Lamd64_ge_fallthru:\n\t"
2667 "lea 0x8(%rsp),%rsp\n\t"
2668 "pop %rax");
2669
2670 if (offset_p)
2671 *offset_p = 13;
2672 if (size_p)
2673 *size_p = 4;
2674 }
2675
2676 struct emit_ops amd64_emit_ops =
2677 {
2678 amd64_emit_prologue,
2679 amd64_emit_epilogue,
2680 amd64_emit_add,
2681 amd64_emit_sub,
2682 amd64_emit_mul,
2683 amd64_emit_lsh,
2684 amd64_emit_rsh_signed,
2685 amd64_emit_rsh_unsigned,
2686 amd64_emit_ext,
2687 amd64_emit_log_not,
2688 amd64_emit_bit_and,
2689 amd64_emit_bit_or,
2690 amd64_emit_bit_xor,
2691 amd64_emit_bit_not,
2692 amd64_emit_equal,
2693 amd64_emit_less_signed,
2694 amd64_emit_less_unsigned,
2695 amd64_emit_ref,
2696 amd64_emit_if_goto,
2697 amd64_emit_goto,
2698 amd64_write_goto_address,
2699 amd64_emit_const,
2700 amd64_emit_call,
2701 amd64_emit_reg,
2702 amd64_emit_pop,
2703 amd64_emit_stack_flush,
2704 amd64_emit_zero_ext,
2705 amd64_emit_swap,
2706 amd64_emit_stack_adjust,
2707 amd64_emit_int_call_1,
2708 amd64_emit_void_call_2,
2709 amd64_emit_eq_goto,
2710 amd64_emit_ne_goto,
2711 amd64_emit_lt_goto,
2712 amd64_emit_le_goto,
2713 amd64_emit_gt_goto,
2714 amd64_emit_ge_goto
2715 };
2716
2717 #endif /* __x86_64__ */
2718
2719 static void
2720 i386_emit_prologue (void)
2721 {
2722 EMIT_ASM32 (i386_prologue,
2723 "push %ebp\n\t"
2724 "mov %esp,%ebp\n\t"
2725 "push %ebx");
2726 /* At this point, the raw regs base address is at 8(%ebp), and the
2727 value pointer is at 12(%ebp). */
2728 }
2729
2730 static void
2731 i386_emit_epilogue (void)
2732 {
2733 EMIT_ASM32 (i386_epilogue,
2734 "mov 12(%ebp),%ecx\n\t"
2735 "mov %eax,(%ecx)\n\t"
2736 "mov %ebx,0x4(%ecx)\n\t"
2737 "xor %eax,%eax\n\t"
2738 "pop %ebx\n\t"
2739 "pop %ebp\n\t"
2740 "ret");
2741 }
2742
2743 static void
2744 i386_emit_add (void)
2745 {
2746 EMIT_ASM32 (i386_add,
2747 "add (%esp),%eax\n\t"
2748 "adc 0x4(%esp),%ebx\n\t"
2749 "lea 0x8(%esp),%esp");
2750 }
2751
2752 static void
2753 i386_emit_sub (void)
2754 {
2755 EMIT_ASM32 (i386_sub,
2756 "subl %eax,(%esp)\n\t"
2757 "sbbl %ebx,4(%esp)\n\t"
2758 "pop %eax\n\t"
2759 "pop %ebx\n\t");
2760 }
2761
2762 static void
2763 i386_emit_mul (void)
2764 {
2765 emit_error = 1;
2766 }
2767
2768 static void
2769 i386_emit_lsh (void)
2770 {
2771 emit_error = 1;
2772 }
2773
2774 static void
2775 i386_emit_rsh_signed (void)
2776 {
2777 emit_error = 1;
2778 }
2779
2780 static void
2781 i386_emit_rsh_unsigned (void)
2782 {
2783 emit_error = 1;
2784 }
2785
2786 static void
2787 i386_emit_ext (int arg)
2788 {
2789 switch (arg)
2790 {
2791 case 8:
2792 EMIT_ASM32 (i386_ext_8,
2793 "cbtw\n\t"
2794 "cwtl\n\t"
2795 "movl %eax,%ebx\n\t"
2796 "sarl $31,%ebx");
2797 break;
2798 case 16:
2799 EMIT_ASM32 (i386_ext_16,
2800 "cwtl\n\t"
2801 "movl %eax,%ebx\n\t"
2802 "sarl $31,%ebx");
2803 break;
2804 case 32:
2805 EMIT_ASM32 (i386_ext_32,
2806 "movl %eax,%ebx\n\t"
2807 "sarl $31,%ebx");
2808 break;
2809 default:
2810 emit_error = 1;
2811 }
2812 }
2813
2814 static void
2815 i386_emit_log_not (void)
2816 {
2817 EMIT_ASM32 (i386_log_not,
2818 "or %ebx,%eax\n\t"
2819 "test %eax,%eax\n\t"
2820 "sete %cl\n\t"
2821 "xor %ebx,%ebx\n\t"
2822 "movzbl %cl,%eax");
2823 }
2824
2825 static void
2826 i386_emit_bit_and (void)
2827 {
2828 EMIT_ASM32 (i386_and,
2829 "and (%esp),%eax\n\t"
2830 "and 0x4(%esp),%ebx\n\t"
2831 "lea 0x8(%esp),%esp");
2832 }
2833
2834 static void
2835 i386_emit_bit_or (void)
2836 {
2837 EMIT_ASM32 (i386_or,
2838 "or (%esp),%eax\n\t"
2839 "or 0x4(%esp),%ebx\n\t"
2840 "lea 0x8(%esp),%esp");
2841 }
2842
2843 static void
2844 i386_emit_bit_xor (void)
2845 {
2846 EMIT_ASM32 (i386_xor,
2847 "xor (%esp),%eax\n\t"
2848 "xor 0x4(%esp),%ebx\n\t"
2849 "lea 0x8(%esp),%esp");
2850 }
2851
2852 static void
2853 i386_emit_bit_not (void)
2854 {
2855 EMIT_ASM32 (i386_bit_not,
2856 "xor $0xffffffff,%eax\n\t"
2857 "xor $0xffffffff,%ebx\n\t");
2858 }
2859
2860 static void
2861 i386_emit_equal (void)
2862 {
2863 EMIT_ASM32 (i386_equal,
2864 "cmpl %ebx,4(%esp)\n\t"
2865 "jne .Li386_equal_false\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "je .Li386_equal_true\n\t"
2868 ".Li386_equal_false:\n\t"
2869 "xor %eax,%eax\n\t"
2870 "jmp .Li386_equal_end\n\t"
2871 ".Li386_equal_true:\n\t"
2872 "mov $1,%eax\n\t"
2873 ".Li386_equal_end:\n\t"
2874 "xor %ebx,%ebx\n\t"
2875 "lea 0x8(%esp),%esp");
2876 }
2877
2878 static void
2879 i386_emit_less_signed (void)
2880 {
2881 EMIT_ASM32 (i386_less_signed,
2882 "cmpl %ebx,4(%esp)\n\t"
2883 "jl .Li386_less_signed_true\n\t"
2884 "jne .Li386_less_signed_false\n\t"
2885 "cmpl %eax,(%esp)\n\t"
2886 "jl .Li386_less_signed_true\n\t"
2887 ".Li386_less_signed_false:\n\t"
2888 "xor %eax,%eax\n\t"
2889 "jmp .Li386_less_signed_end\n\t"
2890 ".Li386_less_signed_true:\n\t"
2891 "mov $1,%eax\n\t"
2892 ".Li386_less_signed_end:\n\t"
2893 "xor %ebx,%ebx\n\t"
2894 "lea 0x8(%esp),%esp");
2895 }
2896
2897 static void
2898 i386_emit_less_unsigned (void)
2899 {
2900 EMIT_ASM32 (i386_less_unsigned,
2901 "cmpl %ebx,4(%esp)\n\t"
2902 "jb .Li386_less_unsigned_true\n\t"
2903 "jne .Li386_less_unsigned_false\n\t"
2904 "cmpl %eax,(%esp)\n\t"
2905 "jb .Li386_less_unsigned_true\n\t"
2906 ".Li386_less_unsigned_false:\n\t"
2907 "xor %eax,%eax\n\t"
2908 "jmp .Li386_less_unsigned_end\n\t"
2909 ".Li386_less_unsigned_true:\n\t"
2910 "mov $1,%eax\n\t"
2911 ".Li386_less_unsigned_end:\n\t"
2912 "xor %ebx,%ebx\n\t"
2913 "lea 0x8(%esp),%esp");
2914 }
2915
2916 static void
2917 i386_emit_ref (int size)
2918 {
2919 switch (size)
2920 {
2921 case 1:
2922 EMIT_ASM32 (i386_ref1,
2923 "movb (%eax),%al");
2924 break;
2925 case 2:
2926 EMIT_ASM32 (i386_ref2,
2927 "movw (%eax),%ax");
2928 break;
2929 case 4:
2930 EMIT_ASM32 (i386_ref4,
2931 "movl (%eax),%eax");
2932 break;
2933 case 8:
2934 EMIT_ASM32 (i386_ref8,
2935 "movl 4(%eax),%ebx\n\t"
2936 "movl (%eax),%eax");
2937 break;
2938 }
2939 }
2940
2941 static void
2942 i386_emit_if_goto (int *offset_p, int *size_p)
2943 {
2944 EMIT_ASM32 (i386_if_goto,
2945 "mov %eax,%ecx\n\t"
2946 "or %ebx,%ecx\n\t"
2947 "pop %eax\n\t"
2948 "pop %ebx\n\t"
2949 "cmpl $0,%ecx\n\t"
2950 /* Don't trust the assembler to choose the right jump */
2951 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2952
2953 if (offset_p)
2954 *offset_p = 11; /* be sure that this matches the sequence above */
2955 if (size_p)
2956 *size_p = 4;
2957 }
2958
2959 static void
2960 i386_emit_goto (int *offset_p, int *size_p)
2961 {
2962 EMIT_ASM32 (i386_goto,
2963 /* Don't trust the assembler to choose the right jump */
2964 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2965 if (offset_p)
2966 *offset_p = 1;
2967 if (size_p)
2968 *size_p = 4;
2969 }
2970
2971 static void
2972 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2973 {
2974 int diff = (to - (from + size));
2975 unsigned char buf[sizeof (int)];
2976
2977 /* We're only doing 4-byte sizes at the moment. */
2978 if (size != 4)
2979 {
2980 emit_error = 1;
2981 return;
2982 }
2983
2984 memcpy (buf, &diff, sizeof (int));
2985 write_inferior_memory (from, buf, sizeof (int));
2986 }
2987
2988 static void
2989 i386_emit_const (LONGEST num)
2990 {
2991 unsigned char buf[16];
2992 int i, hi, lo;
2993 CORE_ADDR buildaddr = current_insn_ptr;
2994
2995 i = 0;
2996 buf[i++] = 0xb8; /* mov $<n>,%eax */
2997 lo = num & 0xffffffff;
2998 memcpy (&buf[i], &lo, sizeof (lo));
2999 i += 4;
3000 hi = ((num >> 32) & 0xffffffff);
3001 if (hi)
3002 {
3003 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3004 memcpy (&buf[i], &hi, sizeof (hi));
3005 i += 4;
3006 }
3007 else
3008 {
3009 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3010 }
3011 append_insns (&buildaddr, i, buf);
3012 current_insn_ptr = buildaddr;
3013 }
3014
3015 static void
3016 i386_emit_call (CORE_ADDR fn)
3017 {
3018 unsigned char buf[16];
3019 int i, offset;
3020 CORE_ADDR buildaddr;
3021
3022 buildaddr = current_insn_ptr;
3023 i = 0;
3024 buf[i++] = 0xe8; /* call <reladdr> */
3025 offset = ((int) fn) - (buildaddr + 5);
3026 memcpy (buf + 1, &offset, 4);
3027 append_insns (&buildaddr, 5, buf);
3028 current_insn_ptr = buildaddr;
3029 }
3030
3031 static void
3032 i386_emit_reg (int reg)
3033 {
3034 unsigned char buf[16];
3035 int i;
3036 CORE_ADDR buildaddr;
3037
3038 EMIT_ASM32 (i386_reg_a,
3039 "sub $0x8,%esp");
3040 buildaddr = current_insn_ptr;
3041 i = 0;
3042 buf[i++] = 0xb8; /* mov $<n>,%eax */
3043 memcpy (&buf[i], &reg, sizeof (reg));
3044 i += 4;
3045 append_insns (&buildaddr, i, buf);
3046 current_insn_ptr = buildaddr;
3047 EMIT_ASM32 (i386_reg_b,
3048 "mov %eax,4(%esp)\n\t"
3049 "mov 8(%ebp),%eax\n\t"
3050 "mov %eax,(%esp)");
3051 i386_emit_call (get_raw_reg_func_addr ());
3052 EMIT_ASM32 (i386_reg_c,
3053 "xor %ebx,%ebx\n\t"
3054 "lea 0x8(%esp),%esp");
3055 }
3056
3057 static void
3058 i386_emit_pop (void)
3059 {
3060 EMIT_ASM32 (i386_pop,
3061 "pop %eax\n\t"
3062 "pop %ebx");
3063 }
3064
3065 static void
3066 i386_emit_stack_flush (void)
3067 {
3068 EMIT_ASM32 (i386_stack_flush,
3069 "push %ebx\n\t"
3070 "push %eax");
3071 }
3072
3073 static void
3074 i386_emit_zero_ext (int arg)
3075 {
3076 switch (arg)
3077 {
3078 case 8:
3079 EMIT_ASM32 (i386_zero_ext_8,
3080 "and $0xff,%eax\n\t"
3081 "xor %ebx,%ebx");
3082 break;
3083 case 16:
3084 EMIT_ASM32 (i386_zero_ext_16,
3085 "and $0xffff,%eax\n\t"
3086 "xor %ebx,%ebx");
3087 break;
3088 case 32:
3089 EMIT_ASM32 (i386_zero_ext_32,
3090 "xor %ebx,%ebx");
3091 break;
3092 default:
3093 emit_error = 1;
3094 }
3095 }
3096
3097 static void
3098 i386_emit_swap (void)
3099 {
3100 EMIT_ASM32 (i386_swap,
3101 "mov %eax,%ecx\n\t"
3102 "mov %ebx,%edx\n\t"
3103 "pop %eax\n\t"
3104 "pop %ebx\n\t"
3105 "push %edx\n\t"
3106 "push %ecx");
3107 }
3108
3109 static void
3110 i386_emit_stack_adjust (int n)
3111 {
3112 unsigned char buf[16];
3113 int i;
3114 CORE_ADDR buildaddr = current_insn_ptr;
3115
3116 i = 0;
3117 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3118 buf[i++] = 0x64;
3119 buf[i++] = 0x24;
3120 buf[i++] = n * 8;
3121 append_insns (&buildaddr, i, buf);
3122 current_insn_ptr = buildaddr;
3123 }
3124
3125 /* FN's prototype is `LONGEST(*fn)(int)'. */
3126
3127 static void
3128 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3129 {
3130 unsigned char buf[16];
3131 int i;
3132 CORE_ADDR buildaddr;
3133
3134 EMIT_ASM32 (i386_int_call_1_a,
3135 /* Reserve a bit of stack space. */
3136 "sub $0x8,%esp");
3137 /* Put the one argument on the stack. */
3138 buildaddr = current_insn_ptr;
3139 i = 0;
3140 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3141 buf[i++] = 0x04;
3142 buf[i++] = 0x24;
3143 memcpy (&buf[i], &arg1, sizeof (arg1));
3144 i += 4;
3145 append_insns (&buildaddr, i, buf);
3146 current_insn_ptr = buildaddr;
3147 i386_emit_call (fn);
3148 EMIT_ASM32 (i386_int_call_1_c,
3149 "mov %edx,%ebx\n\t"
3150 "lea 0x8(%esp),%esp");
3151 }
3152
3153 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3154
3155 static void
3156 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3157 {
3158 unsigned char buf[16];
3159 int i;
3160 CORE_ADDR buildaddr;
3161
3162 EMIT_ASM32 (i386_void_call_2_a,
3163 /* Preserve %eax only; we don't have to worry about %ebx. */
3164 "push %eax\n\t"
3165 /* Reserve a bit of stack space for arguments. */
3166 "sub $0x10,%esp\n\t"
3167 /* Copy "top" to the second argument position. (Note that
3168 we can't assume function won't scribble on its
3169 arguments, so don't try to restore from this.) */
3170 "mov %eax,4(%esp)\n\t"
3171 "mov %ebx,8(%esp)");
3172 /* Put the first argument on the stack. */
3173 buildaddr = current_insn_ptr;
3174 i = 0;
3175 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3176 buf[i++] = 0x04;
3177 buf[i++] = 0x24;
3178 memcpy (&buf[i], &arg1, sizeof (arg1));
3179 i += 4;
3180 append_insns (&buildaddr, i, buf);
3181 current_insn_ptr = buildaddr;
3182 i386_emit_call (fn);
3183 EMIT_ASM32 (i386_void_call_2_b,
3184 "lea 0x10(%esp),%esp\n\t"
3185 /* Restore original stack top. */
3186 "pop %eax");
3187 }
3188
3189
3190 void
3191 i386_emit_eq_goto (int *offset_p, int *size_p)
3192 {
3193 EMIT_ASM32 (eq,
3194 /* Check low half first, more likely to be decider */
3195 "cmpl %eax,(%esp)\n\t"
3196 "jne .Leq_fallthru\n\t"
3197 "cmpl %ebx,4(%esp)\n\t"
3198 "jne .Leq_fallthru\n\t"
3199 "lea 0x8(%esp),%esp\n\t"
3200 "pop %eax\n\t"
3201 "pop %ebx\n\t"
3202 /* jmp, but don't trust the assembler to choose the right jump */
3203 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3204 ".Leq_fallthru:\n\t"
3205 "lea 0x8(%esp),%esp\n\t"
3206 "pop %eax\n\t"
3207 "pop %ebx");
3208
3209 if (offset_p)
3210 *offset_p = 18;
3211 if (size_p)
3212 *size_p = 4;
3213 }
3214
3215 void
3216 i386_emit_ne_goto (int *offset_p, int *size_p)
3217 {
3218 EMIT_ASM32 (ne,
3219 /* Check low half first, more likely to be decider */
3220 "cmpl %eax,(%esp)\n\t"
3221 "jne .Lne_jump\n\t"
3222 "cmpl %ebx,4(%esp)\n\t"
3223 "je .Lne_fallthru\n\t"
3224 ".Lne_jump:\n\t"
3225 "lea 0x8(%esp),%esp\n\t"
3226 "pop %eax\n\t"
3227 "pop %ebx\n\t"
3228 /* jmp, but don't trust the assembler to choose the right jump */
3229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3230 ".Lne_fallthru:\n\t"
3231 "lea 0x8(%esp),%esp\n\t"
3232 "pop %eax\n\t"
3233 "pop %ebx");
3234
3235 if (offset_p)
3236 *offset_p = 18;
3237 if (size_p)
3238 *size_p = 4;
3239 }
3240
3241 void
3242 i386_emit_lt_goto (int *offset_p, int *size_p)
3243 {
3244 EMIT_ASM32 (lt,
3245 "cmpl %ebx,4(%esp)\n\t"
3246 "jl .Llt_jump\n\t"
3247 "jne .Llt_fallthru\n\t"
3248 "cmpl %eax,(%esp)\n\t"
3249 "jnl .Llt_fallthru\n\t"
3250 ".Llt_jump:\n\t"
3251 "lea 0x8(%esp),%esp\n\t"
3252 "pop %eax\n\t"
3253 "pop %ebx\n\t"
3254 /* jmp, but don't trust the assembler to choose the right jump */
3255 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3256 ".Llt_fallthru:\n\t"
3257 "lea 0x8(%esp),%esp\n\t"
3258 "pop %eax\n\t"
3259 "pop %ebx");
3260
3261 if (offset_p)
3262 *offset_p = 20;
3263 if (size_p)
3264 *size_p = 4;
3265 }
3266
3267 void
3268 i386_emit_le_goto (int *offset_p, int *size_p)
3269 {
3270 EMIT_ASM32 (le,
3271 "cmpl %ebx,4(%esp)\n\t"
3272 "jle .Lle_jump\n\t"
3273 "jne .Lle_fallthru\n\t"
3274 "cmpl %eax,(%esp)\n\t"
3275 "jnle .Lle_fallthru\n\t"
3276 ".Lle_jump:\n\t"
3277 "lea 0x8(%esp),%esp\n\t"
3278 "pop %eax\n\t"
3279 "pop %ebx\n\t"
3280 /* jmp, but don't trust the assembler to choose the right jump */
3281 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3282 ".Lle_fallthru:\n\t"
3283 "lea 0x8(%esp),%esp\n\t"
3284 "pop %eax\n\t"
3285 "pop %ebx");
3286
3287 if (offset_p)
3288 *offset_p = 20;
3289 if (size_p)
3290 *size_p = 4;
3291 }
3292
3293 void
3294 i386_emit_gt_goto (int *offset_p, int *size_p)
3295 {
3296 EMIT_ASM32 (gt,
3297 "cmpl %ebx,4(%esp)\n\t"
3298 "jg .Lgt_jump\n\t"
3299 "jne .Lgt_fallthru\n\t"
3300 "cmpl %eax,(%esp)\n\t"
3301 "jng .Lgt_fallthru\n\t"
3302 ".Lgt_jump:\n\t"
3303 "lea 0x8(%esp),%esp\n\t"
3304 "pop %eax\n\t"
3305 "pop %ebx\n\t"
3306 /* jmp, but don't trust the assembler to choose the right jump */
3307 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3308 ".Lgt_fallthru:\n\t"
3309 "lea 0x8(%esp),%esp\n\t"
3310 "pop %eax\n\t"
3311 "pop %ebx");
3312
3313 if (offset_p)
3314 *offset_p = 20;
3315 if (size_p)
3316 *size_p = 4;
3317 }
3318
3319 void
3320 i386_emit_ge_goto (int *offset_p, int *size_p)
3321 {
3322 EMIT_ASM32 (ge,
3323 "cmpl %ebx,4(%esp)\n\t"
3324 "jge .Lge_jump\n\t"
3325 "jne .Lge_fallthru\n\t"
3326 "cmpl %eax,(%esp)\n\t"
3327 "jnge .Lge_fallthru\n\t"
3328 ".Lge_jump:\n\t"
3329 "lea 0x8(%esp),%esp\n\t"
3330 "pop %eax\n\t"
3331 "pop %ebx\n\t"
3332 /* jmp, but don't trust the assembler to choose the right jump */
3333 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3334 ".Lge_fallthru:\n\t"
3335 "lea 0x8(%esp),%esp\n\t"
3336 "pop %eax\n\t"
3337 "pop %ebx");
3338
3339 if (offset_p)
3340 *offset_p = 20;
3341 if (size_p)
3342 *size_p = 4;
3343 }
3344
3345 struct emit_ops i386_emit_ops =
3346 {
3347 i386_emit_prologue,
3348 i386_emit_epilogue,
3349 i386_emit_add,
3350 i386_emit_sub,
3351 i386_emit_mul,
3352 i386_emit_lsh,
3353 i386_emit_rsh_signed,
3354 i386_emit_rsh_unsigned,
3355 i386_emit_ext,
3356 i386_emit_log_not,
3357 i386_emit_bit_and,
3358 i386_emit_bit_or,
3359 i386_emit_bit_xor,
3360 i386_emit_bit_not,
3361 i386_emit_equal,
3362 i386_emit_less_signed,
3363 i386_emit_less_unsigned,
3364 i386_emit_ref,
3365 i386_emit_if_goto,
3366 i386_emit_goto,
3367 i386_write_goto_address,
3368 i386_emit_const,
3369 i386_emit_call,
3370 i386_emit_reg,
3371 i386_emit_pop,
3372 i386_emit_stack_flush,
3373 i386_emit_zero_ext,
3374 i386_emit_swap,
3375 i386_emit_stack_adjust,
3376 i386_emit_int_call_1,
3377 i386_emit_void_call_2,
3378 i386_emit_eq_goto,
3379 i386_emit_ne_goto,
3380 i386_emit_lt_goto,
3381 i386_emit_le_goto,
3382 i386_emit_gt_goto,
3383 i386_emit_ge_goto
3384 };
3385
3386
3387 static struct emit_ops *
3388 x86_emit_ops (void)
3389 {
3390 #ifdef __x86_64__
3391 if (is_64bit_tdesc ())
3392 return &amd64_emit_ops;
3393 else
3394 #endif
3395 return &i386_emit_ops;
3396 }
3397
3398 static int
3399 x86_supports_range_stepping (void)
3400 {
3401 return 1;
3402 }
3403
3404 /* This is initialized assuming an amd64 target.
3405 x86_arch_setup will correct it for i386 or amd64 targets. */
3406
3407 struct linux_target_ops the_low_target =
3408 {
3409 x86_arch_setup,
3410 x86_linux_regs_info,
3411 x86_cannot_fetch_register,
3412 x86_cannot_store_register,
3413 NULL, /* fetch_register */
3414 x86_get_pc,
3415 x86_set_pc,
3416 x86_breakpoint,
3417 x86_breakpoint_len,
3418 NULL,
3419 1,
3420 x86_breakpoint_at,
3421 x86_supports_z_point_type,
3422 x86_insert_point,
3423 x86_remove_point,
3424 x86_stopped_by_watchpoint,
3425 x86_stopped_data_address,
3426 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3427 native i386 case (no registers smaller than an xfer unit), and are not
3428 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3429 NULL,
3430 NULL,
3431 /* need to fix up i386 siginfo if host is amd64 */
3432 x86_siginfo_fixup,
3433 x86_linux_new_process,
3434 x86_linux_new_thread,
3435 x86_linux_prepare_to_resume,
3436 x86_linux_process_qsupported,
3437 x86_supports_tracepoints,
3438 x86_get_thread_area,
3439 x86_install_fast_tracepoint_jump_pad,
3440 x86_emit_ops,
3441 x86_get_min_fast_tracepoint_insn_len,
3442 x86_supports_range_stepping,
3443 };
3444
3445 void
3446 initialize_low_arch (void)
3447 {
3448 /* Initialize the Linux target descriptions. */
3449 #ifdef __x86_64__
3450 init_registers_amd64_linux ();
3451 init_registers_amd64_avx_linux ();
3452 init_registers_amd64_avx512_linux ();
3453 init_registers_amd64_mpx_linux ();
3454
3455 init_registers_x32_linux ();
3456 init_registers_x32_avx_linux ();
3457 init_registers_x32_avx512_linux ();
3458
3459 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3460 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3461 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3462 #endif
3463 init_registers_i386_linux ();
3464 init_registers_i386_mmx_linux ();
3465 init_registers_i386_avx_linux ();
3466 init_registers_i386_avx512_linux ();
3467 init_registers_i386_mpx_linux ();
3468
3469 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3470 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3471 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3472
3473 initialize_regsets_info (&x86_regsets_info);
3474 }
This page took 0.097898 seconds and 5 git commands to generate.