Add iterate_over_lwps to gdbserver
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc *tdesc_amd64_avx512_linux;
54
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc *tdesc_amd64_mpx_linux;
58
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc *tdesc_x32_linux;
62
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc *tdesc_x32_avx_linux;
66
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc *tdesc_x32_avx512_linux;
70
71 #endif
72
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc *tdesc_i386_linux;
76
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc *tdesc_i386_mmx_linux;
80
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc *tdesc_i386_avx_linux;
84
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc *tdesc_i386_avx512_linux;
88
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc *tdesc_i386_mpx_linux;
92
93 #ifdef __x86_64__
94 static struct target_desc *tdesc_amd64_linux_no_xml;
95 #endif
96 static struct target_desc *tdesc_i386_linux_no_xml;
97
98
99 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
101
102 /* Backward compatibility for gdb without XML support. */
103
104 static const char *xmltarget_i386_linux_no_xml = "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
107 </target>";
108
109 #ifdef __x86_64__
110 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
113 </target>";
114 #endif
115
116 #include <sys/reg.h>
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
119 #include <sys/uio.h>
120
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
123 #endif
124
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
127 #endif
128
129
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
132 #endif
133
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
137 #endif
138
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141 #ifndef ARCH_GET_FS
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
146 #endif
147
148 /* Per-process arch-specific data we want to keep. */
149
150 struct arch_process_info
151 {
152 struct x86_debug_reg_state debug_reg_state;
153 };
154
155 /* Per-thread arch-specific data we want to keep. */
156
157 struct arch_lwp_info
158 {
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161 };
162
163 #ifdef __x86_64__
164
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap[] =
169 {
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174 };
175
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
180 #define REGSIZE 8
181
182 static const int x86_64_regmap[] =
183 {
184 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
185 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
186 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
187 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 ORIG_RAX * 8,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
207 };
208
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
211
212 #else /* ! __x86_64__ */
213
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap[] =
217 {
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222 };
223
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
226 #define REGSIZE 4
227
228 #endif
229
230 #ifdef __x86_64__
231
232 /* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235 static int
236 is_64bit_tdesc (void)
237 {
238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
239
240 return register_size (regcache->tdesc, 0) == 8;
241 }
242
243 #endif
244
245 \f
246 /* Called by libthread_db. */
247
248 ps_err_e
249 ps_get_thread_area (const struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251 {
252 #ifdef __x86_64__
253 int use_64bit = is_64bit_tdesc ();
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272 #endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
283 return PS_OK;
284 }
285 }
286
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292 static int
293 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294 {
295 #ifdef __x86_64__
296 int use_64bit = is_64bit_tdesc ();
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309 #endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
325 lwpid_of (thr),
326 (void *) (long) idx, (unsigned long) &desc) < 0)
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332 }
333
334
335 \f
336 static int
337 x86_cannot_store_register (int regno)
338 {
339 #ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return 0;
342 #endif
343
344 return regno >= I386_NUM_REGS;
345 }
346
347 static int
348 x86_cannot_fetch_register (int regno)
349 {
350 #ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return 0;
353 #endif
354
355 return regno >= I386_NUM_REGS;
356 }
357
358 static void
359 x86_fill_gregset (struct regcache *regcache, void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369 return;
370 }
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
375 #endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
379
380 collect_register_by_name (regcache, "orig_eax",
381 ((char *) buf) + ORIG_EAX * REGSIZE);
382 }
383
384 static void
385 x86_store_gregset (struct regcache *regcache, const void *buf)
386 {
387 int i;
388
389 #ifdef __x86_64__
390 if (register_size (regcache->tdesc, 0) == 8)
391 {
392 for (i = 0; i < X86_64_NUM_REGS; i++)
393 if (x86_64_regmap[i] != -1)
394 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
395 return;
396 }
397 #endif
398
399 for (i = 0; i < I386_NUM_REGS; i++)
400 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
401
402 supply_register_by_name (regcache, "orig_eax",
403 ((char *) buf) + ORIG_EAX * REGSIZE);
404 }
405
406 static void
407 x86_fill_fpregset (struct regcache *regcache, void *buf)
408 {
409 #ifdef __x86_64__
410 i387_cache_to_fxsave (regcache, buf);
411 #else
412 i387_cache_to_fsave (regcache, buf);
413 #endif
414 }
415
416 static void
417 x86_store_fpregset (struct regcache *regcache, const void *buf)
418 {
419 #ifdef __x86_64__
420 i387_fxsave_to_cache (regcache, buf);
421 #else
422 i387_fsave_to_cache (regcache, buf);
423 #endif
424 }
425
426 #ifndef __x86_64__
427
428 static void
429 x86_fill_fpxregset (struct regcache *regcache, void *buf)
430 {
431 i387_cache_to_fxsave (regcache, buf);
432 }
433
434 static void
435 x86_store_fpxregset (struct regcache *regcache, const void *buf)
436 {
437 i387_fxsave_to_cache (regcache, buf);
438 }
439
440 #endif
441
442 static void
443 x86_fill_xstateregset (struct regcache *regcache, void *buf)
444 {
445 i387_cache_to_xsave (regcache, buf);
446 }
447
448 static void
449 x86_store_xstateregset (struct regcache *regcache, const void *buf)
450 {
451 i387_xsave_to_cache (regcache, buf);
452 }
453
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
460
461 static struct regset_info x86_regsets[] =
462 {
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
465 GENERAL_REGS,
466 x86_fill_gregset, x86_store_gregset },
467 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
468 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
469 # ifndef __x86_64__
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
472 EXTENDED_REGS,
473 x86_fill_fpxregset, x86_store_fpxregset },
474 # endif
475 # endif
476 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
477 FP_REGS,
478 x86_fill_fpregset, x86_store_fpregset },
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL, NULL }
481 };
482
483 static CORE_ADDR
484 x86_get_pc (struct regcache *regcache)
485 {
486 int use_64bit = register_size (regcache->tdesc, 0) == 8;
487
488 if (use_64bit)
489 {
490 unsigned long pc;
491 collect_register_by_name (regcache, "rip", &pc);
492 return (CORE_ADDR) pc;
493 }
494 else
495 {
496 unsigned int pc;
497 collect_register_by_name (regcache, "eip", &pc);
498 return (CORE_ADDR) pc;
499 }
500 }
501
502 static void
503 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
504 {
505 int use_64bit = register_size (regcache->tdesc, 0) == 8;
506
507 if (use_64bit)
508 {
509 unsigned long newpc = pc;
510 supply_register_by_name (regcache, "rip", &newpc);
511 }
512 else
513 {
514 unsigned int newpc = pc;
515 supply_register_by_name (regcache, "eip", &newpc);
516 }
517 }
518 \f
519 static const unsigned char x86_breakpoint[] = { 0xCC };
520 #define x86_breakpoint_len 1
521
522 static int
523 x86_breakpoint_at (CORE_ADDR pc)
524 {
525 unsigned char c;
526
527 (*the_target->read_memory) (pc, &c, 1);
528 if (c == 0xCC)
529 return 1;
530
531 return 0;
532 }
533 \f
534
535 /* Return the offset of REGNUM in the u_debugreg field of struct
536 user. */
537
538 static int
539 u_debugreg_offset (int regnum)
540 {
541 return (offsetof (struct user, u_debugreg)
542 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
543 }
544
545
546 /* Support for debug registers. */
547
548 static unsigned long
549 x86_linux_dr_get (ptid_t ptid, int regnum)
550 {
551 int tid;
552 unsigned long value;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
557 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
558 if (errno != 0)
559 error ("Couldn't read debug register");
560
561 return value;
562 }
563
564 static void
565 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
566 {
567 int tid;
568
569 tid = ptid_get_lwp (ptid);
570
571 errno = 0;
572 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
573 if (errno != 0)
574 error ("Couldn't write debug register");
575 }
576
577 static int
578 update_debug_registers_callback (struct lwp_info *lwp, void *arg)
579 {
580 /* The actual update is done later just before resuming the lwp,
581 we just mark that the registers need updating. */
582 lwp->arch_private->debug_registers_changed = 1;
583
584 /* If the lwp isn't stopped, force it to momentarily pause, so
585 we can update its debug registers. */
586 if (!lwp->stopped)
587 linux_stop_lwp (lwp);
588
589 return 0;
590 }
591
592 /* Update the inferior's debug register REGNUM from STATE. */
593
594 static void
595 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
596 {
597 /* Only update the threads of this process. */
598 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
599
600 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
601
602 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
603 }
604
605 /* Return the inferior's debug register REGNUM. */
606
607 static CORE_ADDR
608 x86_dr_low_get_addr (int regnum)
609 {
610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
611
612 return x86_linux_dr_get (current_lwp_ptid (), regnum);
613 }
614
615 /* Update the inferior's DR7 debug control register from STATE. */
616
617 static void
618 x86_dr_low_set_control (unsigned long control)
619 {
620 /* Only update the threads of this process. */
621 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
622
623 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
624 }
625
626 /* Return the inferior's DR7 debug control register. */
627
628 static unsigned long
629 x86_dr_low_get_control (void)
630 {
631 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
632 }
633
634 /* Get the value of the DR6 debug status register from the inferior
635 and record it in STATE. */
636
637 static unsigned long
638 x86_dr_low_get_status (void)
639 {
640 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
641 }
642
643 /* Low-level function vector. */
644 struct x86_dr_low_type x86_dr_low =
645 {
646 x86_dr_low_set_control,
647 x86_dr_low_set_addr,
648 x86_dr_low_get_addr,
649 x86_dr_low_get_status,
650 x86_dr_low_get_control,
651 sizeof (void *),
652 };
653 \f
654 /* Breakpoint/Watchpoint support. */
655
656 static int
657 x86_supports_z_point_type (char z_type)
658 {
659 switch (z_type)
660 {
661 case Z_PACKET_SW_BP:
662 case Z_PACKET_HW_BP:
663 case Z_PACKET_WRITE_WP:
664 case Z_PACKET_ACCESS_WP:
665 return 1;
666 default:
667 return 0;
668 }
669 }
670
671 static int
672 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
673 int size, struct raw_breakpoint *bp)
674 {
675 struct process_info *proc = current_process ();
676
677 switch (type)
678 {
679 case raw_bkpt_type_sw:
680 return insert_memory_breakpoint (bp);
681
682 case raw_bkpt_type_hw:
683 case raw_bkpt_type_write_wp:
684 case raw_bkpt_type_access_wp:
685 {
686 enum target_hw_bp_type hw_type
687 = raw_bkpt_type_to_target_hw_bp_type (type);
688 struct x86_debug_reg_state *state
689 = &proc->priv->arch_private->debug_reg_state;
690
691 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
692 }
693
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698 }
699
700 static int
701 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
702 int size, struct raw_breakpoint *bp)
703 {
704 struct process_info *proc = current_process ();
705
706 switch (type)
707 {
708 case raw_bkpt_type_sw:
709 return remove_memory_breakpoint (bp);
710
711 case raw_bkpt_type_hw:
712 case raw_bkpt_type_write_wp:
713 case raw_bkpt_type_access_wp:
714 {
715 enum target_hw_bp_type hw_type
716 = raw_bkpt_type_to_target_hw_bp_type (type);
717 struct x86_debug_reg_state *state
718 = &proc->priv->arch_private->debug_reg_state;
719
720 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
721 }
722 default:
723 /* Unsupported. */
724 return 1;
725 }
726 }
727
728 static int
729 x86_stopped_by_watchpoint (void)
730 {
731 struct process_info *proc = current_process ();
732 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
733 }
734
735 static CORE_ADDR
736 x86_stopped_data_address (void)
737 {
738 struct process_info *proc = current_process ();
739 CORE_ADDR addr;
740 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
741 &addr))
742 return addr;
743 return 0;
744 }
745 \f
746 /* Called when a new process is created. */
747
748 static struct arch_process_info *
749 x86_linux_new_process (void)
750 {
751 struct arch_process_info *info = XCNEW (struct arch_process_info);
752
753 x86_low_init_dregs (&info->debug_reg_state);
754
755 return info;
756 }
757
758 /* Called when a new thread is detected. */
759
760 static struct arch_lwp_info *
761 x86_linux_new_thread (void)
762 {
763 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
764
765 info->debug_registers_changed = 1;
766
767 return info;
768 }
769
770 /* See nat/x86-dregs.h. */
771
772 struct x86_debug_reg_state *
773 x86_debug_reg_state (pid_t pid)
774 {
775 struct process_info *proc = find_process_pid (pid);
776
777 return &proc->priv->arch_private->debug_reg_state;
778 }
779
780 /* Called when resuming a thread.
781 If the debug regs have changed, update the thread's copies. */
782
783 static void
784 x86_linux_prepare_to_resume (struct lwp_info *lwp)
785 {
786 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
787 int clear_status = 0;
788
789 if (lwp->arch_private->debug_registers_changed)
790 {
791 struct x86_debug_reg_state *state
792 = x86_debug_reg_state (ptid_get_pid (ptid));
793 int i;
794
795 x86_linux_dr_set (ptid, DR_CONTROL, 0);
796
797 ALL_DEBUG_ADDRESS_REGISTERS (i)
798 if (state->dr_ref_count[i] > 0)
799 {
800 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
801
802 /* If we're setting a watchpoint, any change the inferior
803 had done itself to the debug registers needs to be
804 discarded, otherwise, x86_dr_stopped_data_address can
805 get confused. */
806 clear_status = 1;
807 }
808
809 if (state->dr_control_mirror != 0)
810 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
811
812 lwp->arch_private->debug_registers_changed = 0;
813 }
814
815 if (clear_status || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
816 x86_linux_dr_set (ptid, DR_STATUS, 0);
817 }
818 \f
819 /* When GDBSERVER is built as a 64-bit application on linux, the
820 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
821 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
822 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
823 conversion in-place ourselves. */
824
825 /* These types below (compat_*) define a siginfo type that is layout
826 compatible with the siginfo type exported by the 32-bit userspace
827 support. */
828
829 #ifdef __x86_64__
830
831 typedef int compat_int_t;
832 typedef unsigned int compat_uptr_t;
833
834 typedef int compat_time_t;
835 typedef int compat_timer_t;
836 typedef int compat_clock_t;
837
838 struct compat_timeval
839 {
840 compat_time_t tv_sec;
841 int tv_usec;
842 };
843
844 typedef union compat_sigval
845 {
846 compat_int_t sival_int;
847 compat_uptr_t sival_ptr;
848 } compat_sigval_t;
849
850 typedef struct compat_siginfo
851 {
852 int si_signo;
853 int si_errno;
854 int si_code;
855
856 union
857 {
858 int _pad[((128 / sizeof (int)) - 3)];
859
860 /* kill() */
861 struct
862 {
863 unsigned int _pid;
864 unsigned int _uid;
865 } _kill;
866
867 /* POSIX.1b timers */
868 struct
869 {
870 compat_timer_t _tid;
871 int _overrun;
872 compat_sigval_t _sigval;
873 } _timer;
874
875 /* POSIX.1b signals */
876 struct
877 {
878 unsigned int _pid;
879 unsigned int _uid;
880 compat_sigval_t _sigval;
881 } _rt;
882
883 /* SIGCHLD */
884 struct
885 {
886 unsigned int _pid;
887 unsigned int _uid;
888 int _status;
889 compat_clock_t _utime;
890 compat_clock_t _stime;
891 } _sigchld;
892
893 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
894 struct
895 {
896 unsigned int _addr;
897 } _sigfault;
898
899 /* SIGPOLL */
900 struct
901 {
902 int _band;
903 int _fd;
904 } _sigpoll;
905 } _sifields;
906 } compat_siginfo_t;
907
908 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
909 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
910
911 typedef struct compat_x32_siginfo
912 {
913 int si_signo;
914 int si_errno;
915 int si_code;
916
917 union
918 {
919 int _pad[((128 / sizeof (int)) - 3)];
920
921 /* kill() */
922 struct
923 {
924 unsigned int _pid;
925 unsigned int _uid;
926 } _kill;
927
928 /* POSIX.1b timers */
929 struct
930 {
931 compat_timer_t _tid;
932 int _overrun;
933 compat_sigval_t _sigval;
934 } _timer;
935
936 /* POSIX.1b signals */
937 struct
938 {
939 unsigned int _pid;
940 unsigned int _uid;
941 compat_sigval_t _sigval;
942 } _rt;
943
944 /* SIGCHLD */
945 struct
946 {
947 unsigned int _pid;
948 unsigned int _uid;
949 int _status;
950 compat_x32_clock_t _utime;
951 compat_x32_clock_t _stime;
952 } _sigchld;
953
954 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
955 struct
956 {
957 unsigned int _addr;
958 } _sigfault;
959
960 /* SIGPOLL */
961 struct
962 {
963 int _band;
964 int _fd;
965 } _sigpoll;
966 } _sifields;
967 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
968
969 #define cpt_si_pid _sifields._kill._pid
970 #define cpt_si_uid _sifields._kill._uid
971 #define cpt_si_timerid _sifields._timer._tid
972 #define cpt_si_overrun _sifields._timer._overrun
973 #define cpt_si_status _sifields._sigchld._status
974 #define cpt_si_utime _sifields._sigchld._utime
975 #define cpt_si_stime _sifields._sigchld._stime
976 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
977 #define cpt_si_addr _sifields._sigfault._addr
978 #define cpt_si_band _sifields._sigpoll._band
979 #define cpt_si_fd _sifields._sigpoll._fd
980
981 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
982 In their place is si_timer1,si_timer2. */
983 #ifndef si_timerid
984 #define si_timerid si_timer1
985 #endif
986 #ifndef si_overrun
987 #define si_overrun si_timer2
988 #endif
989
990 static void
991 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
992 {
993 memset (to, 0, sizeof (*to));
994
995 to->si_signo = from->si_signo;
996 to->si_errno = from->si_errno;
997 to->si_code = from->si_code;
998
999 if (to->si_code == SI_TIMER)
1000 {
1001 to->cpt_si_timerid = from->si_timerid;
1002 to->cpt_si_overrun = from->si_overrun;
1003 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1004 }
1005 else if (to->si_code == SI_USER)
1006 {
1007 to->cpt_si_pid = from->si_pid;
1008 to->cpt_si_uid = from->si_uid;
1009 }
1010 else if (to->si_code < 0)
1011 {
1012 to->cpt_si_pid = from->si_pid;
1013 to->cpt_si_uid = from->si_uid;
1014 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1015 }
1016 else
1017 {
1018 switch (to->si_signo)
1019 {
1020 case SIGCHLD:
1021 to->cpt_si_pid = from->si_pid;
1022 to->cpt_si_uid = from->si_uid;
1023 to->cpt_si_status = from->si_status;
1024 to->cpt_si_utime = from->si_utime;
1025 to->cpt_si_stime = from->si_stime;
1026 break;
1027 case SIGILL:
1028 case SIGFPE:
1029 case SIGSEGV:
1030 case SIGBUS:
1031 to->cpt_si_addr = (intptr_t) from->si_addr;
1032 break;
1033 case SIGPOLL:
1034 to->cpt_si_band = from->si_band;
1035 to->cpt_si_fd = from->si_fd;
1036 break;
1037 default:
1038 to->cpt_si_pid = from->si_pid;
1039 to->cpt_si_uid = from->si_uid;
1040 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1041 break;
1042 }
1043 }
1044 }
1045
1046 static void
1047 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1048 {
1049 memset (to, 0, sizeof (*to));
1050
1051 to->si_signo = from->si_signo;
1052 to->si_errno = from->si_errno;
1053 to->si_code = from->si_code;
1054
1055 if (to->si_code == SI_TIMER)
1056 {
1057 to->si_timerid = from->cpt_si_timerid;
1058 to->si_overrun = from->cpt_si_overrun;
1059 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1060 }
1061 else if (to->si_code == SI_USER)
1062 {
1063 to->si_pid = from->cpt_si_pid;
1064 to->si_uid = from->cpt_si_uid;
1065 }
1066 else if (to->si_code < 0)
1067 {
1068 to->si_pid = from->cpt_si_pid;
1069 to->si_uid = from->cpt_si_uid;
1070 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1071 }
1072 else
1073 {
1074 switch (to->si_signo)
1075 {
1076 case SIGCHLD:
1077 to->si_pid = from->cpt_si_pid;
1078 to->si_uid = from->cpt_si_uid;
1079 to->si_status = from->cpt_si_status;
1080 to->si_utime = from->cpt_si_utime;
1081 to->si_stime = from->cpt_si_stime;
1082 break;
1083 case SIGILL:
1084 case SIGFPE:
1085 case SIGSEGV:
1086 case SIGBUS:
1087 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1088 break;
1089 case SIGPOLL:
1090 to->si_band = from->cpt_si_band;
1091 to->si_fd = from->cpt_si_fd;
1092 break;
1093 default:
1094 to->si_pid = from->cpt_si_pid;
1095 to->si_uid = from->cpt_si_uid;
1096 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1097 break;
1098 }
1099 }
1100 }
1101
1102 static void
1103 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1104 siginfo_t *from)
1105 {
1106 memset (to, 0, sizeof (*to));
1107
1108 to->si_signo = from->si_signo;
1109 to->si_errno = from->si_errno;
1110 to->si_code = from->si_code;
1111
1112 if (to->si_code == SI_TIMER)
1113 {
1114 to->cpt_si_timerid = from->si_timerid;
1115 to->cpt_si_overrun = from->si_overrun;
1116 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1117 }
1118 else if (to->si_code == SI_USER)
1119 {
1120 to->cpt_si_pid = from->si_pid;
1121 to->cpt_si_uid = from->si_uid;
1122 }
1123 else if (to->si_code < 0)
1124 {
1125 to->cpt_si_pid = from->si_pid;
1126 to->cpt_si_uid = from->si_uid;
1127 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1128 }
1129 else
1130 {
1131 switch (to->si_signo)
1132 {
1133 case SIGCHLD:
1134 to->cpt_si_pid = from->si_pid;
1135 to->cpt_si_uid = from->si_uid;
1136 to->cpt_si_status = from->si_status;
1137 to->cpt_si_utime = from->si_utime;
1138 to->cpt_si_stime = from->si_stime;
1139 break;
1140 case SIGILL:
1141 case SIGFPE:
1142 case SIGSEGV:
1143 case SIGBUS:
1144 to->cpt_si_addr = (intptr_t) from->si_addr;
1145 break;
1146 case SIGPOLL:
1147 to->cpt_si_band = from->si_band;
1148 to->cpt_si_fd = from->si_fd;
1149 break;
1150 default:
1151 to->cpt_si_pid = from->si_pid;
1152 to->cpt_si_uid = from->si_uid;
1153 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1154 break;
1155 }
1156 }
1157 }
1158
1159 static void
1160 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1161 compat_x32_siginfo_t *from)
1162 {
1163 memset (to, 0, sizeof (*to));
1164
1165 to->si_signo = from->si_signo;
1166 to->si_errno = from->si_errno;
1167 to->si_code = from->si_code;
1168
1169 if (to->si_code == SI_TIMER)
1170 {
1171 to->si_timerid = from->cpt_si_timerid;
1172 to->si_overrun = from->cpt_si_overrun;
1173 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1174 }
1175 else if (to->si_code == SI_USER)
1176 {
1177 to->si_pid = from->cpt_si_pid;
1178 to->si_uid = from->cpt_si_uid;
1179 }
1180 else if (to->si_code < 0)
1181 {
1182 to->si_pid = from->cpt_si_pid;
1183 to->si_uid = from->cpt_si_uid;
1184 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1185 }
1186 else
1187 {
1188 switch (to->si_signo)
1189 {
1190 case SIGCHLD:
1191 to->si_pid = from->cpt_si_pid;
1192 to->si_uid = from->cpt_si_uid;
1193 to->si_status = from->cpt_si_status;
1194 to->si_utime = from->cpt_si_utime;
1195 to->si_stime = from->cpt_si_stime;
1196 break;
1197 case SIGILL:
1198 case SIGFPE:
1199 case SIGSEGV:
1200 case SIGBUS:
1201 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1202 break;
1203 case SIGPOLL:
1204 to->si_band = from->cpt_si_band;
1205 to->si_fd = from->cpt_si_fd;
1206 break;
1207 default:
1208 to->si_pid = from->cpt_si_pid;
1209 to->si_uid = from->cpt_si_uid;
1210 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1211 break;
1212 }
1213 }
1214 }
1215
1216 #endif /* __x86_64__ */
1217
1218 /* Convert a native/host siginfo object, into/from the siginfo in the
1219 layout of the inferiors' architecture. Returns true if any
1220 conversion was done; false otherwise. If DIRECTION is 1, then copy
1221 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1222 INF. */
1223
1224 static int
1225 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1226 {
1227 #ifdef __x86_64__
1228 unsigned int machine;
1229 int tid = lwpid_of (current_thread);
1230 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1231
1232 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1233 if (!is_64bit_tdesc ())
1234 {
1235 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1236
1237 if (direction == 0)
1238 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1239 else
1240 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1241
1242 return 1;
1243 }
1244 /* No fixup for native x32 GDB. */
1245 else if (!is_elf64 && sizeof (void *) == 8)
1246 {
1247 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1248
1249 if (direction == 0)
1250 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1251 native);
1252 else
1253 siginfo_from_compat_x32_siginfo (native,
1254 (struct compat_x32_siginfo *) inf);
1255
1256 return 1;
1257 }
1258 #endif
1259
1260 return 0;
1261 }
1262 \f
1263 static int use_xml;
1264
1265 /* Format of XSAVE extended state is:
1266 struct
1267 {
1268 fxsave_bytes[0..463]
1269 sw_usable_bytes[464..511]
1270 xstate_hdr_bytes[512..575]
1271 avx_bytes[576..831]
1272 future_state etc
1273 };
1274
1275 Same memory layout will be used for the coredump NT_X86_XSTATE
1276 representing the XSAVE extended state registers.
1277
1278 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1279 extended state mask, which is the same as the extended control register
1280 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1281 together with the mask saved in the xstate_hdr_bytes to determine what
1282 states the processor/OS supports and what state, used or initialized,
1283 the process/thread is in. */
1284 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1285
1286 /* Does the current host support the GETFPXREGS request? The header
1287 file may or may not define it, and even if it is defined, the
1288 kernel will return EIO if it's running on a pre-SSE processor. */
1289 int have_ptrace_getfpxregs =
1290 #ifdef HAVE_PTRACE_GETFPXREGS
1291 -1
1292 #else
1293 0
1294 #endif
1295 ;
1296
1297 /* Does the current host support PTRACE_GETREGSET? */
1298 static int have_ptrace_getregset = -1;
1299
1300 /* Get Linux/x86 target description from running target. */
1301
1302 static const struct target_desc *
1303 x86_linux_read_description (void)
1304 {
1305 unsigned int machine;
1306 int is_elf64;
1307 int xcr0_features;
1308 int tid;
1309 static uint64_t xcr0;
1310 struct regset_info *regset;
1311
1312 tid = lwpid_of (current_thread);
1313
1314 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1315
1316 if (sizeof (void *) == 4)
1317 {
1318 if (is_elf64 > 0)
1319 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1320 #ifndef __x86_64__
1321 else if (machine == EM_X86_64)
1322 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1323 #endif
1324 }
1325
1326 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1327 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1328 {
1329 elf_fpxregset_t fpxregs;
1330
1331 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1332 {
1333 have_ptrace_getfpxregs = 0;
1334 have_ptrace_getregset = 0;
1335 return tdesc_i386_mmx_linux;
1336 }
1337 else
1338 have_ptrace_getfpxregs = 1;
1339 }
1340 #endif
1341
1342 if (!use_xml)
1343 {
1344 x86_xcr0 = X86_XSTATE_SSE_MASK;
1345
1346 /* Don't use XML. */
1347 #ifdef __x86_64__
1348 if (machine == EM_X86_64)
1349 return tdesc_amd64_linux_no_xml;
1350 else
1351 #endif
1352 return tdesc_i386_linux_no_xml;
1353 }
1354
1355 if (have_ptrace_getregset == -1)
1356 {
1357 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1358 struct iovec iov;
1359
1360 iov.iov_base = xstateregs;
1361 iov.iov_len = sizeof (xstateregs);
1362
1363 /* Check if PTRACE_GETREGSET works. */
1364 if (ptrace (PTRACE_GETREGSET, tid,
1365 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1366 have_ptrace_getregset = 0;
1367 else
1368 {
1369 have_ptrace_getregset = 1;
1370
1371 /* Get XCR0 from XSAVE extended state. */
1372 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1373 / sizeof (uint64_t))];
1374
1375 /* Use PTRACE_GETREGSET if it is available. */
1376 for (regset = x86_regsets;
1377 regset->fill_function != NULL; regset++)
1378 if (regset->get_request == PTRACE_GETREGSET)
1379 regset->size = X86_XSTATE_SIZE (xcr0);
1380 else if (regset->type != GENERAL_REGS)
1381 regset->size = 0;
1382 }
1383 }
1384
1385 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1386 xcr0_features = (have_ptrace_getregset
1387 && (xcr0 & X86_XSTATE_ALL_MASK));
1388
1389 if (xcr0_features)
1390 x86_xcr0 = xcr0;
1391
1392 if (machine == EM_X86_64)
1393 {
1394 #ifdef __x86_64__
1395 if (is_elf64)
1396 {
1397 if (xcr0_features)
1398 {
1399 switch (xcr0 & X86_XSTATE_ALL_MASK)
1400 {
1401 case X86_XSTATE_AVX512_MASK:
1402 return tdesc_amd64_avx512_linux;
1403
1404 case X86_XSTATE_MPX_MASK:
1405 return tdesc_amd64_mpx_linux;
1406
1407 case X86_XSTATE_AVX_MASK:
1408 return tdesc_amd64_avx_linux;
1409
1410 default:
1411 return tdesc_amd64_linux;
1412 }
1413 }
1414 else
1415 return tdesc_amd64_linux;
1416 }
1417 else
1418 {
1419 if (xcr0_features)
1420 {
1421 switch (xcr0 & X86_XSTATE_ALL_MASK)
1422 {
1423 case X86_XSTATE_AVX512_MASK:
1424 return tdesc_x32_avx512_linux;
1425
1426 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1427 case X86_XSTATE_AVX_MASK:
1428 return tdesc_x32_avx_linux;
1429
1430 default:
1431 return tdesc_x32_linux;
1432 }
1433 }
1434 else
1435 return tdesc_x32_linux;
1436 }
1437 #endif
1438 }
1439 else
1440 {
1441 if (xcr0_features)
1442 {
1443 switch (xcr0 & X86_XSTATE_ALL_MASK)
1444 {
1445 case (X86_XSTATE_AVX512_MASK):
1446 return tdesc_i386_avx512_linux;
1447
1448 case (X86_XSTATE_MPX_MASK):
1449 return tdesc_i386_mpx_linux;
1450
1451 case (X86_XSTATE_AVX_MASK):
1452 return tdesc_i386_avx_linux;
1453
1454 default:
1455 return tdesc_i386_linux;
1456 }
1457 }
1458 else
1459 return tdesc_i386_linux;
1460 }
1461
1462 gdb_assert_not_reached ("failed to return tdesc");
1463 }
1464
1465 /* Callback for find_inferior. Stops iteration when a thread with a
1466 given PID is found. */
1467
1468 static int
1469 same_process_callback (struct inferior_list_entry *entry, void *data)
1470 {
1471 int pid = *(int *) data;
1472
1473 return (ptid_get_pid (entry->id) == pid);
1474 }
1475
1476 /* Callback for for_each_inferior. Calls the arch_setup routine for
1477 each process. */
1478
1479 static void
1480 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1481 {
1482 int pid = ptid_get_pid (entry->id);
1483
1484 /* Look up any thread of this processes. */
1485 current_thread
1486 = (struct thread_info *) find_inferior (&all_threads,
1487 same_process_callback, &pid);
1488
1489 the_low_target.arch_setup ();
1490 }
1491
1492 /* Update all the target description of all processes; a new GDB
1493 connected, and it may or not support xml target descriptions. */
1494
1495 static void
1496 x86_linux_update_xmltarget (void)
1497 {
1498 struct thread_info *saved_thread = current_thread;
1499
1500 /* Before changing the register cache's internal layout, flush the
1501 contents of the current valid caches back to the threads, and
1502 release the current regcache objects. */
1503 regcache_release ();
1504
1505 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1506
1507 current_thread = saved_thread;
1508 }
1509
1510 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1511 PTRACE_GETREGSET. */
1512
1513 static void
1514 x86_linux_process_qsupported (const char *query)
1515 {
1516 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1517 with "i386" in qSupported query, it supports x86 XML target
1518 descriptions. */
1519 use_xml = 0;
1520 if (query != NULL && startswith (query, "xmlRegisters="))
1521 {
1522 char *copy = xstrdup (query + 13);
1523 char *p;
1524
1525 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1526 {
1527 if (strcmp (p, "i386") == 0)
1528 {
1529 use_xml = 1;
1530 break;
1531 }
1532 }
1533
1534 free (copy);
1535 }
1536
1537 x86_linux_update_xmltarget ();
1538 }
1539
1540 /* Common for x86/x86-64. */
1541
1542 static struct regsets_info x86_regsets_info =
1543 {
1544 x86_regsets, /* regsets */
1545 0, /* num_regsets */
1546 NULL, /* disabled_regsets */
1547 };
1548
1549 #ifdef __x86_64__
1550 static struct regs_info amd64_linux_regs_info =
1551 {
1552 NULL, /* regset_bitmap */
1553 NULL, /* usrregs_info */
1554 &x86_regsets_info
1555 };
1556 #endif
1557 static struct usrregs_info i386_linux_usrregs_info =
1558 {
1559 I386_NUM_REGS,
1560 i386_regmap,
1561 };
1562
1563 static struct regs_info i386_linux_regs_info =
1564 {
1565 NULL, /* regset_bitmap */
1566 &i386_linux_usrregs_info,
1567 &x86_regsets_info
1568 };
1569
1570 const struct regs_info *
1571 x86_linux_regs_info (void)
1572 {
1573 #ifdef __x86_64__
1574 if (is_64bit_tdesc ())
1575 return &amd64_linux_regs_info;
1576 else
1577 #endif
1578 return &i386_linux_regs_info;
1579 }
1580
1581 /* Initialize the target description for the architecture of the
1582 inferior. */
1583
1584 static void
1585 x86_arch_setup (void)
1586 {
1587 current_process ()->tdesc = x86_linux_read_description ();
1588 }
1589
1590 static int
1591 x86_supports_tracepoints (void)
1592 {
1593 return 1;
1594 }
1595
1596 static void
1597 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1598 {
1599 write_inferior_memory (*to, buf, len);
1600 *to += len;
1601 }
1602
1603 static int
1604 push_opcode (unsigned char *buf, char *op)
1605 {
1606 unsigned char *buf_org = buf;
1607
1608 while (1)
1609 {
1610 char *endptr;
1611 unsigned long ul = strtoul (op, &endptr, 16);
1612
1613 if (endptr == op)
1614 break;
1615
1616 *buf++ = ul;
1617 op = endptr;
1618 }
1619
1620 return buf - buf_org;
1621 }
1622
1623 #ifdef __x86_64__
1624
1625 /* Build a jump pad that saves registers and calls a collection
1626 function. Writes a jump instruction to the jump pad to
1627 JJUMPAD_INSN. The caller is responsible to write it in at the
1628 tracepoint address. */
1629
1630 static int
1631 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1632 CORE_ADDR collector,
1633 CORE_ADDR lockaddr,
1634 ULONGEST orig_size,
1635 CORE_ADDR *jump_entry,
1636 CORE_ADDR *trampoline,
1637 ULONGEST *trampoline_size,
1638 unsigned char *jjump_pad_insn,
1639 ULONGEST *jjump_pad_insn_size,
1640 CORE_ADDR *adjusted_insn_addr,
1641 CORE_ADDR *adjusted_insn_addr_end,
1642 char *err)
1643 {
1644 unsigned char buf[40];
1645 int i, offset;
1646 int64_t loffset;
1647
1648 CORE_ADDR buildaddr = *jump_entry;
1649
1650 /* Build the jump pad. */
1651
1652 /* First, do tracepoint data collection. Save registers. */
1653 i = 0;
1654 /* Need to ensure stack pointer saved first. */
1655 buf[i++] = 0x54; /* push %rsp */
1656 buf[i++] = 0x55; /* push %rbp */
1657 buf[i++] = 0x57; /* push %rdi */
1658 buf[i++] = 0x56; /* push %rsi */
1659 buf[i++] = 0x52; /* push %rdx */
1660 buf[i++] = 0x51; /* push %rcx */
1661 buf[i++] = 0x53; /* push %rbx */
1662 buf[i++] = 0x50; /* push %rax */
1663 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1664 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1665 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1666 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1667 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1668 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1669 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1670 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1671 buf[i++] = 0x9c; /* pushfq */
1672 buf[i++] = 0x48; /* movl <addr>,%rdi */
1673 buf[i++] = 0xbf;
1674 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1675 i += sizeof (unsigned long);
1676 buf[i++] = 0x57; /* push %rdi */
1677 append_insns (&buildaddr, i, buf);
1678
1679 /* Stack space for the collecting_t object. */
1680 i = 0;
1681 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1682 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1683 memcpy (buf + i, &tpoint, 8);
1684 i += 8;
1685 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1686 i += push_opcode (&buf[i],
1687 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1688 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1689 append_insns (&buildaddr, i, buf);
1690
1691 /* spin-lock. */
1692 i = 0;
1693 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1694 memcpy (&buf[i], (void *) &lockaddr, 8);
1695 i += 8;
1696 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1697 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1698 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1699 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1700 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1701 append_insns (&buildaddr, i, buf);
1702
1703 /* Set up the gdb_collect call. */
1704 /* At this point, (stack pointer + 0x18) is the base of our saved
1705 register block. */
1706
1707 i = 0;
1708 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1709 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1710
1711 /* tpoint address may be 64-bit wide. */
1712 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1713 memcpy (buf + i, &tpoint, 8);
1714 i += 8;
1715 append_insns (&buildaddr, i, buf);
1716
1717 /* The collector function being in the shared library, may be
1718 >31-bits away off the jump pad. */
1719 i = 0;
1720 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1721 memcpy (buf + i, &collector, 8);
1722 i += 8;
1723 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1724 append_insns (&buildaddr, i, buf);
1725
1726 /* Clear the spin-lock. */
1727 i = 0;
1728 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1729 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1730 memcpy (buf + i, &lockaddr, 8);
1731 i += 8;
1732 append_insns (&buildaddr, i, buf);
1733
1734 /* Remove stack that had been used for the collect_t object. */
1735 i = 0;
1736 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1737 append_insns (&buildaddr, i, buf);
1738
1739 /* Restore register state. */
1740 i = 0;
1741 buf[i++] = 0x48; /* add $0x8,%rsp */
1742 buf[i++] = 0x83;
1743 buf[i++] = 0xc4;
1744 buf[i++] = 0x08;
1745 buf[i++] = 0x9d; /* popfq */
1746 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1747 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1748 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1749 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1750 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1751 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1752 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1753 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1754 buf[i++] = 0x58; /* pop %rax */
1755 buf[i++] = 0x5b; /* pop %rbx */
1756 buf[i++] = 0x59; /* pop %rcx */
1757 buf[i++] = 0x5a; /* pop %rdx */
1758 buf[i++] = 0x5e; /* pop %rsi */
1759 buf[i++] = 0x5f; /* pop %rdi */
1760 buf[i++] = 0x5d; /* pop %rbp */
1761 buf[i++] = 0x5c; /* pop %rsp */
1762 append_insns (&buildaddr, i, buf);
1763
1764 /* Now, adjust the original instruction to execute in the jump
1765 pad. */
1766 *adjusted_insn_addr = buildaddr;
1767 relocate_instruction (&buildaddr, tpaddr);
1768 *adjusted_insn_addr_end = buildaddr;
1769
1770 /* Finally, write a jump back to the program. */
1771
1772 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1773 if (loffset > INT_MAX || loffset < INT_MIN)
1774 {
1775 sprintf (err,
1776 "E.Jump back from jump pad too far from tracepoint "
1777 "(offset 0x%" PRIx64 " > int32).", loffset);
1778 return 1;
1779 }
1780
1781 offset = (int) loffset;
1782 memcpy (buf, jump_insn, sizeof (jump_insn));
1783 memcpy (buf + 1, &offset, 4);
1784 append_insns (&buildaddr, sizeof (jump_insn), buf);
1785
1786 /* The jump pad is now built. Wire in a jump to our jump pad. This
1787 is always done last (by our caller actually), so that we can
1788 install fast tracepoints with threads running. This relies on
1789 the agent's atomic write support. */
1790 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1791 if (loffset > INT_MAX || loffset < INT_MIN)
1792 {
1793 sprintf (err,
1794 "E.Jump pad too far from tracepoint "
1795 "(offset 0x%" PRIx64 " > int32).", loffset);
1796 return 1;
1797 }
1798
1799 offset = (int) loffset;
1800
1801 memcpy (buf, jump_insn, sizeof (jump_insn));
1802 memcpy (buf + 1, &offset, 4);
1803 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1804 *jjump_pad_insn_size = sizeof (jump_insn);
1805
1806 /* Return the end address of our pad. */
1807 *jump_entry = buildaddr;
1808
1809 return 0;
1810 }
1811
1812 #endif /* __x86_64__ */
1813
1814 /* Build a jump pad that saves registers and calls a collection
1815 function. Writes a jump instruction to the jump pad to
1816 JJUMPAD_INSN. The caller is responsible to write it in at the
1817 tracepoint address. */
1818
1819 static int
1820 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1821 CORE_ADDR collector,
1822 CORE_ADDR lockaddr,
1823 ULONGEST orig_size,
1824 CORE_ADDR *jump_entry,
1825 CORE_ADDR *trampoline,
1826 ULONGEST *trampoline_size,
1827 unsigned char *jjump_pad_insn,
1828 ULONGEST *jjump_pad_insn_size,
1829 CORE_ADDR *adjusted_insn_addr,
1830 CORE_ADDR *adjusted_insn_addr_end,
1831 char *err)
1832 {
1833 unsigned char buf[0x100];
1834 int i, offset;
1835 CORE_ADDR buildaddr = *jump_entry;
1836
1837 /* Build the jump pad. */
1838
1839 /* First, do tracepoint data collection. Save registers. */
1840 i = 0;
1841 buf[i++] = 0x60; /* pushad */
1842 buf[i++] = 0x68; /* push tpaddr aka $pc */
1843 *((int *)(buf + i)) = (int) tpaddr;
1844 i += 4;
1845 buf[i++] = 0x9c; /* pushf */
1846 buf[i++] = 0x1e; /* push %ds */
1847 buf[i++] = 0x06; /* push %es */
1848 buf[i++] = 0x0f; /* push %fs */
1849 buf[i++] = 0xa0;
1850 buf[i++] = 0x0f; /* push %gs */
1851 buf[i++] = 0xa8;
1852 buf[i++] = 0x16; /* push %ss */
1853 buf[i++] = 0x0e; /* push %cs */
1854 append_insns (&buildaddr, i, buf);
1855
1856 /* Stack space for the collecting_t object. */
1857 i = 0;
1858 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1859
1860 /* Build the object. */
1861 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1862 memcpy (buf + i, &tpoint, 4);
1863 i += 4;
1864 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1865
1866 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1867 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1868 append_insns (&buildaddr, i, buf);
1869
1870 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1871 If we cared for it, this could be using xchg alternatively. */
1872
1873 i = 0;
1874 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1875 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1876 %esp,<lockaddr> */
1877 memcpy (&buf[i], (void *) &lockaddr, 4);
1878 i += 4;
1879 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1880 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1881 append_insns (&buildaddr, i, buf);
1882
1883
1884 /* Set up arguments to the gdb_collect call. */
1885 i = 0;
1886 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1887 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1888 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1889 append_insns (&buildaddr, i, buf);
1890
1891 i = 0;
1892 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1893 append_insns (&buildaddr, i, buf);
1894
1895 i = 0;
1896 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1897 memcpy (&buf[i], (void *) &tpoint, 4);
1898 i += 4;
1899 append_insns (&buildaddr, i, buf);
1900
1901 buf[0] = 0xe8; /* call <reladdr> */
1902 offset = collector - (buildaddr + sizeof (jump_insn));
1903 memcpy (buf + 1, &offset, 4);
1904 append_insns (&buildaddr, 5, buf);
1905 /* Clean up after the call. */
1906 buf[0] = 0x83; /* add $0x8,%esp */
1907 buf[1] = 0xc4;
1908 buf[2] = 0x08;
1909 append_insns (&buildaddr, 3, buf);
1910
1911
1912 /* Clear the spin-lock. This would need the LOCK prefix on older
1913 broken archs. */
1914 i = 0;
1915 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1916 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1917 memcpy (buf + i, &lockaddr, 4);
1918 i += 4;
1919 append_insns (&buildaddr, i, buf);
1920
1921
1922 /* Remove stack that had been used for the collect_t object. */
1923 i = 0;
1924 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1925 append_insns (&buildaddr, i, buf);
1926
1927 i = 0;
1928 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1929 buf[i++] = 0xc4;
1930 buf[i++] = 0x04;
1931 buf[i++] = 0x17; /* pop %ss */
1932 buf[i++] = 0x0f; /* pop %gs */
1933 buf[i++] = 0xa9;
1934 buf[i++] = 0x0f; /* pop %fs */
1935 buf[i++] = 0xa1;
1936 buf[i++] = 0x07; /* pop %es */
1937 buf[i++] = 0x1f; /* pop %ds */
1938 buf[i++] = 0x9d; /* popf */
1939 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1940 buf[i++] = 0xc4;
1941 buf[i++] = 0x04;
1942 buf[i++] = 0x61; /* popad */
1943 append_insns (&buildaddr, i, buf);
1944
1945 /* Now, adjust the original instruction to execute in the jump
1946 pad. */
1947 *adjusted_insn_addr = buildaddr;
1948 relocate_instruction (&buildaddr, tpaddr);
1949 *adjusted_insn_addr_end = buildaddr;
1950
1951 /* Write the jump back to the program. */
1952 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1953 memcpy (buf, jump_insn, sizeof (jump_insn));
1954 memcpy (buf + 1, &offset, 4);
1955 append_insns (&buildaddr, sizeof (jump_insn), buf);
1956
1957 /* The jump pad is now built. Wire in a jump to our jump pad. This
1958 is always done last (by our caller actually), so that we can
1959 install fast tracepoints with threads running. This relies on
1960 the agent's atomic write support. */
1961 if (orig_size == 4)
1962 {
1963 /* Create a trampoline. */
1964 *trampoline_size = sizeof (jump_insn);
1965 if (!claim_trampoline_space (*trampoline_size, trampoline))
1966 {
1967 /* No trampoline space available. */
1968 strcpy (err,
1969 "E.Cannot allocate trampoline space needed for fast "
1970 "tracepoints on 4-byte instructions.");
1971 return 1;
1972 }
1973
1974 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1975 memcpy (buf, jump_insn, sizeof (jump_insn));
1976 memcpy (buf + 1, &offset, 4);
1977 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1978
1979 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1980 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1981 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1982 memcpy (buf + 2, &offset, 2);
1983 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1984 *jjump_pad_insn_size = sizeof (small_jump_insn);
1985 }
1986 else
1987 {
1988 /* Else use a 32-bit relative jump instruction. */
1989 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1990 memcpy (buf, jump_insn, sizeof (jump_insn));
1991 memcpy (buf + 1, &offset, 4);
1992 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1993 *jjump_pad_insn_size = sizeof (jump_insn);
1994 }
1995
1996 /* Return the end address of our pad. */
1997 *jump_entry = buildaddr;
1998
1999 return 0;
2000 }
2001
2002 static int
2003 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2004 CORE_ADDR collector,
2005 CORE_ADDR lockaddr,
2006 ULONGEST orig_size,
2007 CORE_ADDR *jump_entry,
2008 CORE_ADDR *trampoline,
2009 ULONGEST *trampoline_size,
2010 unsigned char *jjump_pad_insn,
2011 ULONGEST *jjump_pad_insn_size,
2012 CORE_ADDR *adjusted_insn_addr,
2013 CORE_ADDR *adjusted_insn_addr_end,
2014 char *err)
2015 {
2016 #ifdef __x86_64__
2017 if (is_64bit_tdesc ())
2018 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2019 collector, lockaddr,
2020 orig_size, jump_entry,
2021 trampoline, trampoline_size,
2022 jjump_pad_insn,
2023 jjump_pad_insn_size,
2024 adjusted_insn_addr,
2025 adjusted_insn_addr_end,
2026 err);
2027 #endif
2028
2029 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2030 collector, lockaddr,
2031 orig_size, jump_entry,
2032 trampoline, trampoline_size,
2033 jjump_pad_insn,
2034 jjump_pad_insn_size,
2035 adjusted_insn_addr,
2036 adjusted_insn_addr_end,
2037 err);
2038 }
2039
2040 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2041 architectures. */
2042
2043 static int
2044 x86_get_min_fast_tracepoint_insn_len (void)
2045 {
2046 static int warned_about_fast_tracepoints = 0;
2047
2048 #ifdef __x86_64__
2049 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2050 used for fast tracepoints. */
2051 if (is_64bit_tdesc ())
2052 return 5;
2053 #endif
2054
2055 if (agent_loaded_p ())
2056 {
2057 char errbuf[IPA_BUFSIZ];
2058
2059 errbuf[0] = '\0';
2060
2061 /* On x86, if trampolines are available, then 4-byte jump instructions
2062 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2063 with a 4-byte offset are used instead. */
2064 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2065 return 4;
2066 else
2067 {
2068 /* GDB has no channel to explain to user why a shorter fast
2069 tracepoint is not possible, but at least make GDBserver
2070 mention that something has gone awry. */
2071 if (!warned_about_fast_tracepoints)
2072 {
2073 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2074 warned_about_fast_tracepoints = 1;
2075 }
2076 return 5;
2077 }
2078 }
2079 else
2080 {
2081 /* Indicate that the minimum length is currently unknown since the IPA
2082 has not loaded yet. */
2083 return 0;
2084 }
2085 }
2086
2087 static void
2088 add_insns (unsigned char *start, int len)
2089 {
2090 CORE_ADDR buildaddr = current_insn_ptr;
2091
2092 if (debug_threads)
2093 debug_printf ("Adding %d bytes of insn at %s\n",
2094 len, paddress (buildaddr));
2095
2096 append_insns (&buildaddr, len, start);
2097 current_insn_ptr = buildaddr;
2098 }
2099
2100 /* Our general strategy for emitting code is to avoid specifying raw
2101 bytes whenever possible, and instead copy a block of inline asm
2102 that is embedded in the function. This is a little messy, because
2103 we need to keep the compiler from discarding what looks like dead
2104 code, plus suppress various warnings. */
2105
2106 #define EMIT_ASM(NAME, INSNS) \
2107 do \
2108 { \
2109 extern unsigned char start_ ## NAME, end_ ## NAME; \
2110 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2111 __asm__ ("jmp end_" #NAME "\n" \
2112 "\t" "start_" #NAME ":" \
2113 "\t" INSNS "\n" \
2114 "\t" "end_" #NAME ":"); \
2115 } while (0)
2116
2117 #ifdef __x86_64__
2118
2119 #define EMIT_ASM32(NAME,INSNS) \
2120 do \
2121 { \
2122 extern unsigned char start_ ## NAME, end_ ## NAME; \
2123 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2124 __asm__ (".code32\n" \
2125 "\t" "jmp end_" #NAME "\n" \
2126 "\t" "start_" #NAME ":\n" \
2127 "\t" INSNS "\n" \
2128 "\t" "end_" #NAME ":\n" \
2129 ".code64\n"); \
2130 } while (0)
2131
2132 #else
2133
2134 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2135
2136 #endif
2137
2138 #ifdef __x86_64__
2139
2140 static void
2141 amd64_emit_prologue (void)
2142 {
2143 EMIT_ASM (amd64_prologue,
2144 "pushq %rbp\n\t"
2145 "movq %rsp,%rbp\n\t"
2146 "sub $0x20,%rsp\n\t"
2147 "movq %rdi,-8(%rbp)\n\t"
2148 "movq %rsi,-16(%rbp)");
2149 }
2150
2151
2152 static void
2153 amd64_emit_epilogue (void)
2154 {
2155 EMIT_ASM (amd64_epilogue,
2156 "movq -16(%rbp),%rdi\n\t"
2157 "movq %rax,(%rdi)\n\t"
2158 "xor %rax,%rax\n\t"
2159 "leave\n\t"
2160 "ret");
2161 }
2162
2163 static void
2164 amd64_emit_add (void)
2165 {
2166 EMIT_ASM (amd64_add,
2167 "add (%rsp),%rax\n\t"
2168 "lea 0x8(%rsp),%rsp");
2169 }
2170
2171 static void
2172 amd64_emit_sub (void)
2173 {
2174 EMIT_ASM (amd64_sub,
2175 "sub %rax,(%rsp)\n\t"
2176 "pop %rax");
2177 }
2178
2179 static void
2180 amd64_emit_mul (void)
2181 {
2182 emit_error = 1;
2183 }
2184
2185 static void
2186 amd64_emit_lsh (void)
2187 {
2188 emit_error = 1;
2189 }
2190
2191 static void
2192 amd64_emit_rsh_signed (void)
2193 {
2194 emit_error = 1;
2195 }
2196
2197 static void
2198 amd64_emit_rsh_unsigned (void)
2199 {
2200 emit_error = 1;
2201 }
2202
2203 static void
2204 amd64_emit_ext (int arg)
2205 {
2206 switch (arg)
2207 {
2208 case 8:
2209 EMIT_ASM (amd64_ext_8,
2210 "cbtw\n\t"
2211 "cwtl\n\t"
2212 "cltq");
2213 break;
2214 case 16:
2215 EMIT_ASM (amd64_ext_16,
2216 "cwtl\n\t"
2217 "cltq");
2218 break;
2219 case 32:
2220 EMIT_ASM (amd64_ext_32,
2221 "cltq");
2222 break;
2223 default:
2224 emit_error = 1;
2225 }
2226 }
2227
2228 static void
2229 amd64_emit_log_not (void)
2230 {
2231 EMIT_ASM (amd64_log_not,
2232 "test %rax,%rax\n\t"
2233 "sete %cl\n\t"
2234 "movzbq %cl,%rax");
2235 }
2236
2237 static void
2238 amd64_emit_bit_and (void)
2239 {
2240 EMIT_ASM (amd64_and,
2241 "and (%rsp),%rax\n\t"
2242 "lea 0x8(%rsp),%rsp");
2243 }
2244
2245 static void
2246 amd64_emit_bit_or (void)
2247 {
2248 EMIT_ASM (amd64_or,
2249 "or (%rsp),%rax\n\t"
2250 "lea 0x8(%rsp),%rsp");
2251 }
2252
2253 static void
2254 amd64_emit_bit_xor (void)
2255 {
2256 EMIT_ASM (amd64_xor,
2257 "xor (%rsp),%rax\n\t"
2258 "lea 0x8(%rsp),%rsp");
2259 }
2260
2261 static void
2262 amd64_emit_bit_not (void)
2263 {
2264 EMIT_ASM (amd64_bit_not,
2265 "xorq $0xffffffffffffffff,%rax");
2266 }
2267
2268 static void
2269 amd64_emit_equal (void)
2270 {
2271 EMIT_ASM (amd64_equal,
2272 "cmp %rax,(%rsp)\n\t"
2273 "je .Lamd64_equal_true\n\t"
2274 "xor %rax,%rax\n\t"
2275 "jmp .Lamd64_equal_end\n\t"
2276 ".Lamd64_equal_true:\n\t"
2277 "mov $0x1,%rax\n\t"
2278 ".Lamd64_equal_end:\n\t"
2279 "lea 0x8(%rsp),%rsp");
2280 }
2281
2282 static void
2283 amd64_emit_less_signed (void)
2284 {
2285 EMIT_ASM (amd64_less_signed,
2286 "cmp %rax,(%rsp)\n\t"
2287 "jl .Lamd64_less_signed_true\n\t"
2288 "xor %rax,%rax\n\t"
2289 "jmp .Lamd64_less_signed_end\n\t"
2290 ".Lamd64_less_signed_true:\n\t"
2291 "mov $1,%rax\n\t"
2292 ".Lamd64_less_signed_end:\n\t"
2293 "lea 0x8(%rsp),%rsp");
2294 }
2295
2296 static void
2297 amd64_emit_less_unsigned (void)
2298 {
2299 EMIT_ASM (amd64_less_unsigned,
2300 "cmp %rax,(%rsp)\n\t"
2301 "jb .Lamd64_less_unsigned_true\n\t"
2302 "xor %rax,%rax\n\t"
2303 "jmp .Lamd64_less_unsigned_end\n\t"
2304 ".Lamd64_less_unsigned_true:\n\t"
2305 "mov $1,%rax\n\t"
2306 ".Lamd64_less_unsigned_end:\n\t"
2307 "lea 0x8(%rsp),%rsp");
2308 }
2309
2310 static void
2311 amd64_emit_ref (int size)
2312 {
2313 switch (size)
2314 {
2315 case 1:
2316 EMIT_ASM (amd64_ref1,
2317 "movb (%rax),%al");
2318 break;
2319 case 2:
2320 EMIT_ASM (amd64_ref2,
2321 "movw (%rax),%ax");
2322 break;
2323 case 4:
2324 EMIT_ASM (amd64_ref4,
2325 "movl (%rax),%eax");
2326 break;
2327 case 8:
2328 EMIT_ASM (amd64_ref8,
2329 "movq (%rax),%rax");
2330 break;
2331 }
2332 }
2333
2334 static void
2335 amd64_emit_if_goto (int *offset_p, int *size_p)
2336 {
2337 EMIT_ASM (amd64_if_goto,
2338 "mov %rax,%rcx\n\t"
2339 "pop %rax\n\t"
2340 "cmp $0,%rcx\n\t"
2341 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2342 if (offset_p)
2343 *offset_p = 10;
2344 if (size_p)
2345 *size_p = 4;
2346 }
2347
2348 static void
2349 amd64_emit_goto (int *offset_p, int *size_p)
2350 {
2351 EMIT_ASM (amd64_goto,
2352 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2353 if (offset_p)
2354 *offset_p = 1;
2355 if (size_p)
2356 *size_p = 4;
2357 }
2358
2359 static void
2360 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2361 {
2362 int diff = (to - (from + size));
2363 unsigned char buf[sizeof (int)];
2364
2365 if (size != 4)
2366 {
2367 emit_error = 1;
2368 return;
2369 }
2370
2371 memcpy (buf, &diff, sizeof (int));
2372 write_inferior_memory (from, buf, sizeof (int));
2373 }
2374
2375 static void
2376 amd64_emit_const (LONGEST num)
2377 {
2378 unsigned char buf[16];
2379 int i;
2380 CORE_ADDR buildaddr = current_insn_ptr;
2381
2382 i = 0;
2383 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2384 memcpy (&buf[i], &num, sizeof (num));
2385 i += 8;
2386 append_insns (&buildaddr, i, buf);
2387 current_insn_ptr = buildaddr;
2388 }
2389
2390 static void
2391 amd64_emit_call (CORE_ADDR fn)
2392 {
2393 unsigned char buf[16];
2394 int i;
2395 CORE_ADDR buildaddr;
2396 LONGEST offset64;
2397
2398 /* The destination function being in the shared library, may be
2399 >31-bits away off the compiled code pad. */
2400
2401 buildaddr = current_insn_ptr;
2402
2403 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2404
2405 i = 0;
2406
2407 if (offset64 > INT_MAX || offset64 < INT_MIN)
2408 {
2409 /* Offset is too large for a call. Use callq, but that requires
2410 a register, so avoid it if possible. Use r10, since it is
2411 call-clobbered, we don't have to push/pop it. */
2412 buf[i++] = 0x48; /* mov $fn,%r10 */
2413 buf[i++] = 0xba;
2414 memcpy (buf + i, &fn, 8);
2415 i += 8;
2416 buf[i++] = 0xff; /* callq *%r10 */
2417 buf[i++] = 0xd2;
2418 }
2419 else
2420 {
2421 int offset32 = offset64; /* we know we can't overflow here. */
2422 memcpy (buf + i, &offset32, 4);
2423 i += 4;
2424 }
2425
2426 append_insns (&buildaddr, i, buf);
2427 current_insn_ptr = buildaddr;
2428 }
2429
2430 static void
2431 amd64_emit_reg (int reg)
2432 {
2433 unsigned char buf[16];
2434 int i;
2435 CORE_ADDR buildaddr;
2436
2437 /* Assume raw_regs is still in %rdi. */
2438 buildaddr = current_insn_ptr;
2439 i = 0;
2440 buf[i++] = 0xbe; /* mov $<n>,%esi */
2441 memcpy (&buf[i], &reg, sizeof (reg));
2442 i += 4;
2443 append_insns (&buildaddr, i, buf);
2444 current_insn_ptr = buildaddr;
2445 amd64_emit_call (get_raw_reg_func_addr ());
2446 }
2447
2448 static void
2449 amd64_emit_pop (void)
2450 {
2451 EMIT_ASM (amd64_pop,
2452 "pop %rax");
2453 }
2454
2455 static void
2456 amd64_emit_stack_flush (void)
2457 {
2458 EMIT_ASM (amd64_stack_flush,
2459 "push %rax");
2460 }
2461
2462 static void
2463 amd64_emit_zero_ext (int arg)
2464 {
2465 switch (arg)
2466 {
2467 case 8:
2468 EMIT_ASM (amd64_zero_ext_8,
2469 "and $0xff,%rax");
2470 break;
2471 case 16:
2472 EMIT_ASM (amd64_zero_ext_16,
2473 "and $0xffff,%rax");
2474 break;
2475 case 32:
2476 EMIT_ASM (amd64_zero_ext_32,
2477 "mov $0xffffffff,%rcx\n\t"
2478 "and %rcx,%rax");
2479 break;
2480 default:
2481 emit_error = 1;
2482 }
2483 }
2484
2485 static void
2486 amd64_emit_swap (void)
2487 {
2488 EMIT_ASM (amd64_swap,
2489 "mov %rax,%rcx\n\t"
2490 "pop %rax\n\t"
2491 "push %rcx");
2492 }
2493
2494 static void
2495 amd64_emit_stack_adjust (int n)
2496 {
2497 unsigned char buf[16];
2498 int i;
2499 CORE_ADDR buildaddr = current_insn_ptr;
2500
2501 i = 0;
2502 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2503 buf[i++] = 0x8d;
2504 buf[i++] = 0x64;
2505 buf[i++] = 0x24;
2506 /* This only handles adjustments up to 16, but we don't expect any more. */
2507 buf[i++] = n * 8;
2508 append_insns (&buildaddr, i, buf);
2509 current_insn_ptr = buildaddr;
2510 }
2511
2512 /* FN's prototype is `LONGEST(*fn)(int)'. */
2513
2514 static void
2515 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2516 {
2517 unsigned char buf[16];
2518 int i;
2519 CORE_ADDR buildaddr;
2520
2521 buildaddr = current_insn_ptr;
2522 i = 0;
2523 buf[i++] = 0xbf; /* movl $<n>,%edi */
2524 memcpy (&buf[i], &arg1, sizeof (arg1));
2525 i += 4;
2526 append_insns (&buildaddr, i, buf);
2527 current_insn_ptr = buildaddr;
2528 amd64_emit_call (fn);
2529 }
2530
2531 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2532
2533 static void
2534 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2535 {
2536 unsigned char buf[16];
2537 int i;
2538 CORE_ADDR buildaddr;
2539
2540 buildaddr = current_insn_ptr;
2541 i = 0;
2542 buf[i++] = 0xbf; /* movl $<n>,%edi */
2543 memcpy (&buf[i], &arg1, sizeof (arg1));
2544 i += 4;
2545 append_insns (&buildaddr, i, buf);
2546 current_insn_ptr = buildaddr;
2547 EMIT_ASM (amd64_void_call_2_a,
2548 /* Save away a copy of the stack top. */
2549 "push %rax\n\t"
2550 /* Also pass top as the second argument. */
2551 "mov %rax,%rsi");
2552 amd64_emit_call (fn);
2553 EMIT_ASM (amd64_void_call_2_b,
2554 /* Restore the stack top, %rax may have been trashed. */
2555 "pop %rax");
2556 }
2557
2558 void
2559 amd64_emit_eq_goto (int *offset_p, int *size_p)
2560 {
2561 EMIT_ASM (amd64_eq,
2562 "cmp %rax,(%rsp)\n\t"
2563 "jne .Lamd64_eq_fallthru\n\t"
2564 "lea 0x8(%rsp),%rsp\n\t"
2565 "pop %rax\n\t"
2566 /* jmp, but don't trust the assembler to choose the right jump */
2567 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2568 ".Lamd64_eq_fallthru:\n\t"
2569 "lea 0x8(%rsp),%rsp\n\t"
2570 "pop %rax");
2571
2572 if (offset_p)
2573 *offset_p = 13;
2574 if (size_p)
2575 *size_p = 4;
2576 }
2577
2578 void
2579 amd64_emit_ne_goto (int *offset_p, int *size_p)
2580 {
2581 EMIT_ASM (amd64_ne,
2582 "cmp %rax,(%rsp)\n\t"
2583 "je .Lamd64_ne_fallthru\n\t"
2584 "lea 0x8(%rsp),%rsp\n\t"
2585 "pop %rax\n\t"
2586 /* jmp, but don't trust the assembler to choose the right jump */
2587 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2588 ".Lamd64_ne_fallthru:\n\t"
2589 "lea 0x8(%rsp),%rsp\n\t"
2590 "pop %rax");
2591
2592 if (offset_p)
2593 *offset_p = 13;
2594 if (size_p)
2595 *size_p = 4;
2596 }
2597
2598 void
2599 amd64_emit_lt_goto (int *offset_p, int *size_p)
2600 {
2601 EMIT_ASM (amd64_lt,
2602 "cmp %rax,(%rsp)\n\t"
2603 "jnl .Lamd64_lt_fallthru\n\t"
2604 "lea 0x8(%rsp),%rsp\n\t"
2605 "pop %rax\n\t"
2606 /* jmp, but don't trust the assembler to choose the right jump */
2607 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2608 ".Lamd64_lt_fallthru:\n\t"
2609 "lea 0x8(%rsp),%rsp\n\t"
2610 "pop %rax");
2611
2612 if (offset_p)
2613 *offset_p = 13;
2614 if (size_p)
2615 *size_p = 4;
2616 }
2617
2618 void
2619 amd64_emit_le_goto (int *offset_p, int *size_p)
2620 {
2621 EMIT_ASM (amd64_le,
2622 "cmp %rax,(%rsp)\n\t"
2623 "jnle .Lamd64_le_fallthru\n\t"
2624 "lea 0x8(%rsp),%rsp\n\t"
2625 "pop %rax\n\t"
2626 /* jmp, but don't trust the assembler to choose the right jump */
2627 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2628 ".Lamd64_le_fallthru:\n\t"
2629 "lea 0x8(%rsp),%rsp\n\t"
2630 "pop %rax");
2631
2632 if (offset_p)
2633 *offset_p = 13;
2634 if (size_p)
2635 *size_p = 4;
2636 }
2637
2638 void
2639 amd64_emit_gt_goto (int *offset_p, int *size_p)
2640 {
2641 EMIT_ASM (amd64_gt,
2642 "cmp %rax,(%rsp)\n\t"
2643 "jng .Lamd64_gt_fallthru\n\t"
2644 "lea 0x8(%rsp),%rsp\n\t"
2645 "pop %rax\n\t"
2646 /* jmp, but don't trust the assembler to choose the right jump */
2647 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2648 ".Lamd64_gt_fallthru:\n\t"
2649 "lea 0x8(%rsp),%rsp\n\t"
2650 "pop %rax");
2651
2652 if (offset_p)
2653 *offset_p = 13;
2654 if (size_p)
2655 *size_p = 4;
2656 }
2657
2658 void
2659 amd64_emit_ge_goto (int *offset_p, int *size_p)
2660 {
2661 EMIT_ASM (amd64_ge,
2662 "cmp %rax,(%rsp)\n\t"
2663 "jnge .Lamd64_ge_fallthru\n\t"
2664 ".Lamd64_ge_jump:\n\t"
2665 "lea 0x8(%rsp),%rsp\n\t"
2666 "pop %rax\n\t"
2667 /* jmp, but don't trust the assembler to choose the right jump */
2668 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2669 ".Lamd64_ge_fallthru:\n\t"
2670 "lea 0x8(%rsp),%rsp\n\t"
2671 "pop %rax");
2672
2673 if (offset_p)
2674 *offset_p = 13;
2675 if (size_p)
2676 *size_p = 4;
2677 }
2678
2679 struct emit_ops amd64_emit_ops =
2680 {
2681 amd64_emit_prologue,
2682 amd64_emit_epilogue,
2683 amd64_emit_add,
2684 amd64_emit_sub,
2685 amd64_emit_mul,
2686 amd64_emit_lsh,
2687 amd64_emit_rsh_signed,
2688 amd64_emit_rsh_unsigned,
2689 amd64_emit_ext,
2690 amd64_emit_log_not,
2691 amd64_emit_bit_and,
2692 amd64_emit_bit_or,
2693 amd64_emit_bit_xor,
2694 amd64_emit_bit_not,
2695 amd64_emit_equal,
2696 amd64_emit_less_signed,
2697 amd64_emit_less_unsigned,
2698 amd64_emit_ref,
2699 amd64_emit_if_goto,
2700 amd64_emit_goto,
2701 amd64_write_goto_address,
2702 amd64_emit_const,
2703 amd64_emit_call,
2704 amd64_emit_reg,
2705 amd64_emit_pop,
2706 amd64_emit_stack_flush,
2707 amd64_emit_zero_ext,
2708 amd64_emit_swap,
2709 amd64_emit_stack_adjust,
2710 amd64_emit_int_call_1,
2711 amd64_emit_void_call_2,
2712 amd64_emit_eq_goto,
2713 amd64_emit_ne_goto,
2714 amd64_emit_lt_goto,
2715 amd64_emit_le_goto,
2716 amd64_emit_gt_goto,
2717 amd64_emit_ge_goto
2718 };
2719
2720 #endif /* __x86_64__ */
2721
2722 static void
2723 i386_emit_prologue (void)
2724 {
2725 EMIT_ASM32 (i386_prologue,
2726 "push %ebp\n\t"
2727 "mov %esp,%ebp\n\t"
2728 "push %ebx");
2729 /* At this point, the raw regs base address is at 8(%ebp), and the
2730 value pointer is at 12(%ebp). */
2731 }
2732
2733 static void
2734 i386_emit_epilogue (void)
2735 {
2736 EMIT_ASM32 (i386_epilogue,
2737 "mov 12(%ebp),%ecx\n\t"
2738 "mov %eax,(%ecx)\n\t"
2739 "mov %ebx,0x4(%ecx)\n\t"
2740 "xor %eax,%eax\n\t"
2741 "pop %ebx\n\t"
2742 "pop %ebp\n\t"
2743 "ret");
2744 }
2745
2746 static void
2747 i386_emit_add (void)
2748 {
2749 EMIT_ASM32 (i386_add,
2750 "add (%esp),%eax\n\t"
2751 "adc 0x4(%esp),%ebx\n\t"
2752 "lea 0x8(%esp),%esp");
2753 }
2754
2755 static void
2756 i386_emit_sub (void)
2757 {
2758 EMIT_ASM32 (i386_sub,
2759 "subl %eax,(%esp)\n\t"
2760 "sbbl %ebx,4(%esp)\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx\n\t");
2763 }
2764
2765 static void
2766 i386_emit_mul (void)
2767 {
2768 emit_error = 1;
2769 }
2770
2771 static void
2772 i386_emit_lsh (void)
2773 {
2774 emit_error = 1;
2775 }
2776
2777 static void
2778 i386_emit_rsh_signed (void)
2779 {
2780 emit_error = 1;
2781 }
2782
2783 static void
2784 i386_emit_rsh_unsigned (void)
2785 {
2786 emit_error = 1;
2787 }
2788
2789 static void
2790 i386_emit_ext (int arg)
2791 {
2792 switch (arg)
2793 {
2794 case 8:
2795 EMIT_ASM32 (i386_ext_8,
2796 "cbtw\n\t"
2797 "cwtl\n\t"
2798 "movl %eax,%ebx\n\t"
2799 "sarl $31,%ebx");
2800 break;
2801 case 16:
2802 EMIT_ASM32 (i386_ext_16,
2803 "cwtl\n\t"
2804 "movl %eax,%ebx\n\t"
2805 "sarl $31,%ebx");
2806 break;
2807 case 32:
2808 EMIT_ASM32 (i386_ext_32,
2809 "movl %eax,%ebx\n\t"
2810 "sarl $31,%ebx");
2811 break;
2812 default:
2813 emit_error = 1;
2814 }
2815 }
2816
2817 static void
2818 i386_emit_log_not (void)
2819 {
2820 EMIT_ASM32 (i386_log_not,
2821 "or %ebx,%eax\n\t"
2822 "test %eax,%eax\n\t"
2823 "sete %cl\n\t"
2824 "xor %ebx,%ebx\n\t"
2825 "movzbl %cl,%eax");
2826 }
2827
2828 static void
2829 i386_emit_bit_and (void)
2830 {
2831 EMIT_ASM32 (i386_and,
2832 "and (%esp),%eax\n\t"
2833 "and 0x4(%esp),%ebx\n\t"
2834 "lea 0x8(%esp),%esp");
2835 }
2836
2837 static void
2838 i386_emit_bit_or (void)
2839 {
2840 EMIT_ASM32 (i386_or,
2841 "or (%esp),%eax\n\t"
2842 "or 0x4(%esp),%ebx\n\t"
2843 "lea 0x8(%esp),%esp");
2844 }
2845
2846 static void
2847 i386_emit_bit_xor (void)
2848 {
2849 EMIT_ASM32 (i386_xor,
2850 "xor (%esp),%eax\n\t"
2851 "xor 0x4(%esp),%ebx\n\t"
2852 "lea 0x8(%esp),%esp");
2853 }
2854
2855 static void
2856 i386_emit_bit_not (void)
2857 {
2858 EMIT_ASM32 (i386_bit_not,
2859 "xor $0xffffffff,%eax\n\t"
2860 "xor $0xffffffff,%ebx\n\t");
2861 }
2862
2863 static void
2864 i386_emit_equal (void)
2865 {
2866 EMIT_ASM32 (i386_equal,
2867 "cmpl %ebx,4(%esp)\n\t"
2868 "jne .Li386_equal_false\n\t"
2869 "cmpl %eax,(%esp)\n\t"
2870 "je .Li386_equal_true\n\t"
2871 ".Li386_equal_false:\n\t"
2872 "xor %eax,%eax\n\t"
2873 "jmp .Li386_equal_end\n\t"
2874 ".Li386_equal_true:\n\t"
2875 "mov $1,%eax\n\t"
2876 ".Li386_equal_end:\n\t"
2877 "xor %ebx,%ebx\n\t"
2878 "lea 0x8(%esp),%esp");
2879 }
2880
2881 static void
2882 i386_emit_less_signed (void)
2883 {
2884 EMIT_ASM32 (i386_less_signed,
2885 "cmpl %ebx,4(%esp)\n\t"
2886 "jl .Li386_less_signed_true\n\t"
2887 "jne .Li386_less_signed_false\n\t"
2888 "cmpl %eax,(%esp)\n\t"
2889 "jl .Li386_less_signed_true\n\t"
2890 ".Li386_less_signed_false:\n\t"
2891 "xor %eax,%eax\n\t"
2892 "jmp .Li386_less_signed_end\n\t"
2893 ".Li386_less_signed_true:\n\t"
2894 "mov $1,%eax\n\t"
2895 ".Li386_less_signed_end:\n\t"
2896 "xor %ebx,%ebx\n\t"
2897 "lea 0x8(%esp),%esp");
2898 }
2899
2900 static void
2901 i386_emit_less_unsigned (void)
2902 {
2903 EMIT_ASM32 (i386_less_unsigned,
2904 "cmpl %ebx,4(%esp)\n\t"
2905 "jb .Li386_less_unsigned_true\n\t"
2906 "jne .Li386_less_unsigned_false\n\t"
2907 "cmpl %eax,(%esp)\n\t"
2908 "jb .Li386_less_unsigned_true\n\t"
2909 ".Li386_less_unsigned_false:\n\t"
2910 "xor %eax,%eax\n\t"
2911 "jmp .Li386_less_unsigned_end\n\t"
2912 ".Li386_less_unsigned_true:\n\t"
2913 "mov $1,%eax\n\t"
2914 ".Li386_less_unsigned_end:\n\t"
2915 "xor %ebx,%ebx\n\t"
2916 "lea 0x8(%esp),%esp");
2917 }
2918
2919 static void
2920 i386_emit_ref (int size)
2921 {
2922 switch (size)
2923 {
2924 case 1:
2925 EMIT_ASM32 (i386_ref1,
2926 "movb (%eax),%al");
2927 break;
2928 case 2:
2929 EMIT_ASM32 (i386_ref2,
2930 "movw (%eax),%ax");
2931 break;
2932 case 4:
2933 EMIT_ASM32 (i386_ref4,
2934 "movl (%eax),%eax");
2935 break;
2936 case 8:
2937 EMIT_ASM32 (i386_ref8,
2938 "movl 4(%eax),%ebx\n\t"
2939 "movl (%eax),%eax");
2940 break;
2941 }
2942 }
2943
2944 static void
2945 i386_emit_if_goto (int *offset_p, int *size_p)
2946 {
2947 EMIT_ASM32 (i386_if_goto,
2948 "mov %eax,%ecx\n\t"
2949 "or %ebx,%ecx\n\t"
2950 "pop %eax\n\t"
2951 "pop %ebx\n\t"
2952 "cmpl $0,%ecx\n\t"
2953 /* Don't trust the assembler to choose the right jump */
2954 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2955
2956 if (offset_p)
2957 *offset_p = 11; /* be sure that this matches the sequence above */
2958 if (size_p)
2959 *size_p = 4;
2960 }
2961
2962 static void
2963 i386_emit_goto (int *offset_p, int *size_p)
2964 {
2965 EMIT_ASM32 (i386_goto,
2966 /* Don't trust the assembler to choose the right jump */
2967 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2968 if (offset_p)
2969 *offset_p = 1;
2970 if (size_p)
2971 *size_p = 4;
2972 }
2973
2974 static void
2975 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2976 {
2977 int diff = (to - (from + size));
2978 unsigned char buf[sizeof (int)];
2979
2980 /* We're only doing 4-byte sizes at the moment. */
2981 if (size != 4)
2982 {
2983 emit_error = 1;
2984 return;
2985 }
2986
2987 memcpy (buf, &diff, sizeof (int));
2988 write_inferior_memory (from, buf, sizeof (int));
2989 }
2990
2991 static void
2992 i386_emit_const (LONGEST num)
2993 {
2994 unsigned char buf[16];
2995 int i, hi, lo;
2996 CORE_ADDR buildaddr = current_insn_ptr;
2997
2998 i = 0;
2999 buf[i++] = 0xb8; /* mov $<n>,%eax */
3000 lo = num & 0xffffffff;
3001 memcpy (&buf[i], &lo, sizeof (lo));
3002 i += 4;
3003 hi = ((num >> 32) & 0xffffffff);
3004 if (hi)
3005 {
3006 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3007 memcpy (&buf[i], &hi, sizeof (hi));
3008 i += 4;
3009 }
3010 else
3011 {
3012 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3013 }
3014 append_insns (&buildaddr, i, buf);
3015 current_insn_ptr = buildaddr;
3016 }
3017
3018 static void
3019 i386_emit_call (CORE_ADDR fn)
3020 {
3021 unsigned char buf[16];
3022 int i, offset;
3023 CORE_ADDR buildaddr;
3024
3025 buildaddr = current_insn_ptr;
3026 i = 0;
3027 buf[i++] = 0xe8; /* call <reladdr> */
3028 offset = ((int) fn) - (buildaddr + 5);
3029 memcpy (buf + 1, &offset, 4);
3030 append_insns (&buildaddr, 5, buf);
3031 current_insn_ptr = buildaddr;
3032 }
3033
3034 static void
3035 i386_emit_reg (int reg)
3036 {
3037 unsigned char buf[16];
3038 int i;
3039 CORE_ADDR buildaddr;
3040
3041 EMIT_ASM32 (i386_reg_a,
3042 "sub $0x8,%esp");
3043 buildaddr = current_insn_ptr;
3044 i = 0;
3045 buf[i++] = 0xb8; /* mov $<n>,%eax */
3046 memcpy (&buf[i], &reg, sizeof (reg));
3047 i += 4;
3048 append_insns (&buildaddr, i, buf);
3049 current_insn_ptr = buildaddr;
3050 EMIT_ASM32 (i386_reg_b,
3051 "mov %eax,4(%esp)\n\t"
3052 "mov 8(%ebp),%eax\n\t"
3053 "mov %eax,(%esp)");
3054 i386_emit_call (get_raw_reg_func_addr ());
3055 EMIT_ASM32 (i386_reg_c,
3056 "xor %ebx,%ebx\n\t"
3057 "lea 0x8(%esp),%esp");
3058 }
3059
3060 static void
3061 i386_emit_pop (void)
3062 {
3063 EMIT_ASM32 (i386_pop,
3064 "pop %eax\n\t"
3065 "pop %ebx");
3066 }
3067
3068 static void
3069 i386_emit_stack_flush (void)
3070 {
3071 EMIT_ASM32 (i386_stack_flush,
3072 "push %ebx\n\t"
3073 "push %eax");
3074 }
3075
3076 static void
3077 i386_emit_zero_ext (int arg)
3078 {
3079 switch (arg)
3080 {
3081 case 8:
3082 EMIT_ASM32 (i386_zero_ext_8,
3083 "and $0xff,%eax\n\t"
3084 "xor %ebx,%ebx");
3085 break;
3086 case 16:
3087 EMIT_ASM32 (i386_zero_ext_16,
3088 "and $0xffff,%eax\n\t"
3089 "xor %ebx,%ebx");
3090 break;
3091 case 32:
3092 EMIT_ASM32 (i386_zero_ext_32,
3093 "xor %ebx,%ebx");
3094 break;
3095 default:
3096 emit_error = 1;
3097 }
3098 }
3099
3100 static void
3101 i386_emit_swap (void)
3102 {
3103 EMIT_ASM32 (i386_swap,
3104 "mov %eax,%ecx\n\t"
3105 "mov %ebx,%edx\n\t"
3106 "pop %eax\n\t"
3107 "pop %ebx\n\t"
3108 "push %edx\n\t"
3109 "push %ecx");
3110 }
3111
3112 static void
3113 i386_emit_stack_adjust (int n)
3114 {
3115 unsigned char buf[16];
3116 int i;
3117 CORE_ADDR buildaddr = current_insn_ptr;
3118
3119 i = 0;
3120 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3121 buf[i++] = 0x64;
3122 buf[i++] = 0x24;
3123 buf[i++] = n * 8;
3124 append_insns (&buildaddr, i, buf);
3125 current_insn_ptr = buildaddr;
3126 }
3127
3128 /* FN's prototype is `LONGEST(*fn)(int)'. */
3129
3130 static void
3131 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3132 {
3133 unsigned char buf[16];
3134 int i;
3135 CORE_ADDR buildaddr;
3136
3137 EMIT_ASM32 (i386_int_call_1_a,
3138 /* Reserve a bit of stack space. */
3139 "sub $0x8,%esp");
3140 /* Put the one argument on the stack. */
3141 buildaddr = current_insn_ptr;
3142 i = 0;
3143 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3144 buf[i++] = 0x04;
3145 buf[i++] = 0x24;
3146 memcpy (&buf[i], &arg1, sizeof (arg1));
3147 i += 4;
3148 append_insns (&buildaddr, i, buf);
3149 current_insn_ptr = buildaddr;
3150 i386_emit_call (fn);
3151 EMIT_ASM32 (i386_int_call_1_c,
3152 "mov %edx,%ebx\n\t"
3153 "lea 0x8(%esp),%esp");
3154 }
3155
3156 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3157
3158 static void
3159 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3160 {
3161 unsigned char buf[16];
3162 int i;
3163 CORE_ADDR buildaddr;
3164
3165 EMIT_ASM32 (i386_void_call_2_a,
3166 /* Preserve %eax only; we don't have to worry about %ebx. */
3167 "push %eax\n\t"
3168 /* Reserve a bit of stack space for arguments. */
3169 "sub $0x10,%esp\n\t"
3170 /* Copy "top" to the second argument position. (Note that
3171 we can't assume function won't scribble on its
3172 arguments, so don't try to restore from this.) */
3173 "mov %eax,4(%esp)\n\t"
3174 "mov %ebx,8(%esp)");
3175 /* Put the first argument on the stack. */
3176 buildaddr = current_insn_ptr;
3177 i = 0;
3178 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3179 buf[i++] = 0x04;
3180 buf[i++] = 0x24;
3181 memcpy (&buf[i], &arg1, sizeof (arg1));
3182 i += 4;
3183 append_insns (&buildaddr, i, buf);
3184 current_insn_ptr = buildaddr;
3185 i386_emit_call (fn);
3186 EMIT_ASM32 (i386_void_call_2_b,
3187 "lea 0x10(%esp),%esp\n\t"
3188 /* Restore original stack top. */
3189 "pop %eax");
3190 }
3191
3192
3193 void
3194 i386_emit_eq_goto (int *offset_p, int *size_p)
3195 {
3196 EMIT_ASM32 (eq,
3197 /* Check low half first, more likely to be decider */
3198 "cmpl %eax,(%esp)\n\t"
3199 "jne .Leq_fallthru\n\t"
3200 "cmpl %ebx,4(%esp)\n\t"
3201 "jne .Leq_fallthru\n\t"
3202 "lea 0x8(%esp),%esp\n\t"
3203 "pop %eax\n\t"
3204 "pop %ebx\n\t"
3205 /* jmp, but don't trust the assembler to choose the right jump */
3206 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3207 ".Leq_fallthru:\n\t"
3208 "lea 0x8(%esp),%esp\n\t"
3209 "pop %eax\n\t"
3210 "pop %ebx");
3211
3212 if (offset_p)
3213 *offset_p = 18;
3214 if (size_p)
3215 *size_p = 4;
3216 }
3217
3218 void
3219 i386_emit_ne_goto (int *offset_p, int *size_p)
3220 {
3221 EMIT_ASM32 (ne,
3222 /* Check low half first, more likely to be decider */
3223 "cmpl %eax,(%esp)\n\t"
3224 "jne .Lne_jump\n\t"
3225 "cmpl %ebx,4(%esp)\n\t"
3226 "je .Lne_fallthru\n\t"
3227 ".Lne_jump:\n\t"
3228 "lea 0x8(%esp),%esp\n\t"
3229 "pop %eax\n\t"
3230 "pop %ebx\n\t"
3231 /* jmp, but don't trust the assembler to choose the right jump */
3232 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3233 ".Lne_fallthru:\n\t"
3234 "lea 0x8(%esp),%esp\n\t"
3235 "pop %eax\n\t"
3236 "pop %ebx");
3237
3238 if (offset_p)
3239 *offset_p = 18;
3240 if (size_p)
3241 *size_p = 4;
3242 }
3243
3244 void
3245 i386_emit_lt_goto (int *offset_p, int *size_p)
3246 {
3247 EMIT_ASM32 (lt,
3248 "cmpl %ebx,4(%esp)\n\t"
3249 "jl .Llt_jump\n\t"
3250 "jne .Llt_fallthru\n\t"
3251 "cmpl %eax,(%esp)\n\t"
3252 "jnl .Llt_fallthru\n\t"
3253 ".Llt_jump:\n\t"
3254 "lea 0x8(%esp),%esp\n\t"
3255 "pop %eax\n\t"
3256 "pop %ebx\n\t"
3257 /* jmp, but don't trust the assembler to choose the right jump */
3258 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3259 ".Llt_fallthru:\n\t"
3260 "lea 0x8(%esp),%esp\n\t"
3261 "pop %eax\n\t"
3262 "pop %ebx");
3263
3264 if (offset_p)
3265 *offset_p = 20;
3266 if (size_p)
3267 *size_p = 4;
3268 }
3269
3270 void
3271 i386_emit_le_goto (int *offset_p, int *size_p)
3272 {
3273 EMIT_ASM32 (le,
3274 "cmpl %ebx,4(%esp)\n\t"
3275 "jle .Lle_jump\n\t"
3276 "jne .Lle_fallthru\n\t"
3277 "cmpl %eax,(%esp)\n\t"
3278 "jnle .Lle_fallthru\n\t"
3279 ".Lle_jump:\n\t"
3280 "lea 0x8(%esp),%esp\n\t"
3281 "pop %eax\n\t"
3282 "pop %ebx\n\t"
3283 /* jmp, but don't trust the assembler to choose the right jump */
3284 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3285 ".Lle_fallthru:\n\t"
3286 "lea 0x8(%esp),%esp\n\t"
3287 "pop %eax\n\t"
3288 "pop %ebx");
3289
3290 if (offset_p)
3291 *offset_p = 20;
3292 if (size_p)
3293 *size_p = 4;
3294 }
3295
3296 void
3297 i386_emit_gt_goto (int *offset_p, int *size_p)
3298 {
3299 EMIT_ASM32 (gt,
3300 "cmpl %ebx,4(%esp)\n\t"
3301 "jg .Lgt_jump\n\t"
3302 "jne .Lgt_fallthru\n\t"
3303 "cmpl %eax,(%esp)\n\t"
3304 "jng .Lgt_fallthru\n\t"
3305 ".Lgt_jump:\n\t"
3306 "lea 0x8(%esp),%esp\n\t"
3307 "pop %eax\n\t"
3308 "pop %ebx\n\t"
3309 /* jmp, but don't trust the assembler to choose the right jump */
3310 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3311 ".Lgt_fallthru:\n\t"
3312 "lea 0x8(%esp),%esp\n\t"
3313 "pop %eax\n\t"
3314 "pop %ebx");
3315
3316 if (offset_p)
3317 *offset_p = 20;
3318 if (size_p)
3319 *size_p = 4;
3320 }
3321
3322 void
3323 i386_emit_ge_goto (int *offset_p, int *size_p)
3324 {
3325 EMIT_ASM32 (ge,
3326 "cmpl %ebx,4(%esp)\n\t"
3327 "jge .Lge_jump\n\t"
3328 "jne .Lge_fallthru\n\t"
3329 "cmpl %eax,(%esp)\n\t"
3330 "jnge .Lge_fallthru\n\t"
3331 ".Lge_jump:\n\t"
3332 "lea 0x8(%esp),%esp\n\t"
3333 "pop %eax\n\t"
3334 "pop %ebx\n\t"
3335 /* jmp, but don't trust the assembler to choose the right jump */
3336 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3337 ".Lge_fallthru:\n\t"
3338 "lea 0x8(%esp),%esp\n\t"
3339 "pop %eax\n\t"
3340 "pop %ebx");
3341
3342 if (offset_p)
3343 *offset_p = 20;
3344 if (size_p)
3345 *size_p = 4;
3346 }
3347
3348 struct emit_ops i386_emit_ops =
3349 {
3350 i386_emit_prologue,
3351 i386_emit_epilogue,
3352 i386_emit_add,
3353 i386_emit_sub,
3354 i386_emit_mul,
3355 i386_emit_lsh,
3356 i386_emit_rsh_signed,
3357 i386_emit_rsh_unsigned,
3358 i386_emit_ext,
3359 i386_emit_log_not,
3360 i386_emit_bit_and,
3361 i386_emit_bit_or,
3362 i386_emit_bit_xor,
3363 i386_emit_bit_not,
3364 i386_emit_equal,
3365 i386_emit_less_signed,
3366 i386_emit_less_unsigned,
3367 i386_emit_ref,
3368 i386_emit_if_goto,
3369 i386_emit_goto,
3370 i386_write_goto_address,
3371 i386_emit_const,
3372 i386_emit_call,
3373 i386_emit_reg,
3374 i386_emit_pop,
3375 i386_emit_stack_flush,
3376 i386_emit_zero_ext,
3377 i386_emit_swap,
3378 i386_emit_stack_adjust,
3379 i386_emit_int_call_1,
3380 i386_emit_void_call_2,
3381 i386_emit_eq_goto,
3382 i386_emit_ne_goto,
3383 i386_emit_lt_goto,
3384 i386_emit_le_goto,
3385 i386_emit_gt_goto,
3386 i386_emit_ge_goto
3387 };
3388
3389
3390 static struct emit_ops *
3391 x86_emit_ops (void)
3392 {
3393 #ifdef __x86_64__
3394 if (is_64bit_tdesc ())
3395 return &amd64_emit_ops;
3396 else
3397 #endif
3398 return &i386_emit_ops;
3399 }
3400
3401 static int
3402 x86_supports_range_stepping (void)
3403 {
3404 return 1;
3405 }
3406
3407 /* This is initialized assuming an amd64 target.
3408 x86_arch_setup will correct it for i386 or amd64 targets. */
3409
3410 struct linux_target_ops the_low_target =
3411 {
3412 x86_arch_setup,
3413 x86_linux_regs_info,
3414 x86_cannot_fetch_register,
3415 x86_cannot_store_register,
3416 NULL, /* fetch_register */
3417 x86_get_pc,
3418 x86_set_pc,
3419 x86_breakpoint,
3420 x86_breakpoint_len,
3421 NULL,
3422 1,
3423 x86_breakpoint_at,
3424 x86_supports_z_point_type,
3425 x86_insert_point,
3426 x86_remove_point,
3427 x86_stopped_by_watchpoint,
3428 x86_stopped_data_address,
3429 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3430 native i386 case (no registers smaller than an xfer unit), and are not
3431 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3432 NULL,
3433 NULL,
3434 /* need to fix up i386 siginfo if host is amd64 */
3435 x86_siginfo_fixup,
3436 x86_linux_new_process,
3437 x86_linux_new_thread,
3438 x86_linux_prepare_to_resume,
3439 x86_linux_process_qsupported,
3440 x86_supports_tracepoints,
3441 x86_get_thread_area,
3442 x86_install_fast_tracepoint_jump_pad,
3443 x86_emit_ops,
3444 x86_get_min_fast_tracepoint_insn_len,
3445 x86_supports_range_stepping,
3446 };
3447
3448 void
3449 initialize_low_arch (void)
3450 {
3451 /* Initialize the Linux target descriptions. */
3452 #ifdef __x86_64__
3453 init_registers_amd64_linux ();
3454 init_registers_amd64_avx_linux ();
3455 init_registers_amd64_avx512_linux ();
3456 init_registers_amd64_mpx_linux ();
3457
3458 init_registers_x32_linux ();
3459 init_registers_x32_avx_linux ();
3460 init_registers_x32_avx512_linux ();
3461
3462 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3463 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3464 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3465 #endif
3466 init_registers_i386_linux ();
3467 init_registers_i386_mmx_linux ();
3468 init_registers_i386_avx_linux ();
3469 init_registers_i386_avx512_linux ();
3470 init_registers_i386_mpx_linux ();
3471
3472 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3473 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3474 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3475
3476 initialize_regsets_info (&x86_regsets_info);
3477 }
This page took 0.11762 seconds and 5 git commands to generate.