x86 Linux watchpoints: Couldn't write debug register: Invalid argument.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <stddef.h>
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "server.h"
25 #include "linux-low.h"
26 #include "i387-fp.h"
27 #include "i386-low.h"
28 #include "i386-xstate.h"
29
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33 #ifndef ELFMAG0
34 #include "elf/common.h"
35 #endif
36
37 #include "agent.h"
38 #include "tdesc.h"
39 #include "tracepoint.h"
40 #include "ax.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc *tdesc_amd64_avx512_linux;
54
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc *tdesc_amd64_mpx_linux;
58
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc *tdesc_x32_linux;
62
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc *tdesc_x32_avx_linux;
66
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc *tdesc_x32_avx512_linux;
70
71 #endif
72
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc *tdesc_i386_linux;
76
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc *tdesc_i386_mmx_linux;
80
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc *tdesc_i386_avx_linux;
84
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc *tdesc_i386_avx512_linux;
88
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc *tdesc_i386_mpx_linux;
92
93 #ifdef __x86_64__
94 static struct target_desc *tdesc_amd64_linux_no_xml;
95 #endif
96 static struct target_desc *tdesc_i386_linux_no_xml;
97
98
99 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
101
102 /* Backward compatibility for gdb without XML support. */
103
104 static const char *xmltarget_i386_linux_no_xml = "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
107 </target>";
108
109 #ifdef __x86_64__
110 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
113 </target>";
114 #endif
115
116 #include <sys/reg.h>
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
119 #include <sys/uio.h>
120
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
123 #endif
124
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
127 #endif
128
129
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
132 #endif
133
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
137 #endif
138
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141 #ifndef ARCH_GET_FS
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
146 #endif
147
148 /* Per-process arch-specific data we want to keep. */
149
150 struct arch_process_info
151 {
152 struct i386_debug_reg_state debug_reg_state;
153 };
154
155 /* Per-thread arch-specific data we want to keep. */
156
157 struct arch_lwp_info
158 {
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161 };
162
163 #ifdef __x86_64__
164
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap[] =
169 {
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174 };
175
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
180
181 static const int x86_64_regmap[] =
182 {
183 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
184 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
185 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
186 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
187 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
188 DS * 8, ES * 8, FS * 8, GS * 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 ORIG_RAX * 8,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
206 };
207
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
210
211 #else /* ! __x86_64__ */
212
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap[] =
216 {
217 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
218 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
219 EIP * 4, EFL * 4, CS * 4, SS * 4,
220 DS * 4, ES * 4, FS * 4, GS * 4
221 };
222
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224
225 #endif
226
227 #ifdef __x86_64__
228
229 /* Returns true if the current inferior belongs to a x86-64 process,
230 per the tdesc. */
231
232 static int
233 is_64bit_tdesc (void)
234 {
235 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
236
237 return register_size (regcache->tdesc, 0) == 8;
238 }
239
240 #endif
241
242 \f
243 /* Called by libthread_db. */
244
245 ps_err_e
246 ps_get_thread_area (const struct ps_prochandle *ph,
247 lwpid_t lwpid, int idx, void **base)
248 {
249 #ifdef __x86_64__
250 int use_64bit = is_64bit_tdesc ();
251
252 if (use_64bit)
253 {
254 switch (idx)
255 {
256 case FS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
258 return PS_OK;
259 break;
260 case GS:
261 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
262 return PS_OK;
263 break;
264 default:
265 return PS_BADADDR;
266 }
267 return PS_ERR;
268 }
269 #endif
270
271 {
272 unsigned int desc[4];
273
274 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
275 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
276 return PS_ERR;
277
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base = (void *) (uintptr_t) desc[1];
280 return PS_OK;
281 }
282 }
283
284 /* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
288
289 static int
290 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
291 {
292 #ifdef __x86_64__
293 int use_64bit = is_64bit_tdesc ();
294
295 if (use_64bit)
296 {
297 void *base;
298 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
299 {
300 *addr = (CORE_ADDR) (uintptr_t) base;
301 return 0;
302 }
303
304 return -1;
305 }
306 #endif
307
308 {
309 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
310 struct thread_info *thr = get_lwp_thread (lwp);
311 struct regcache *regcache = get_thread_regcache (thr, 1);
312 unsigned int desc[4];
313 ULONGEST gs = 0;
314 const int reg_thread_area = 3; /* bits to scale down register value. */
315 int idx;
316
317 collect_register_by_name (regcache, "gs", &gs);
318
319 idx = gs >> reg_thread_area;
320
321 if (ptrace (PTRACE_GET_THREAD_AREA,
322 lwpid_of (thr),
323 (void *) (long) idx, (unsigned long) &desc) < 0)
324 return -1;
325
326 *addr = desc[1];
327 return 0;
328 }
329 }
330
331
332 \f
333 static int
334 x86_cannot_store_register (int regno)
335 {
336 #ifdef __x86_64__
337 if (is_64bit_tdesc ())
338 return 0;
339 #endif
340
341 return regno >= I386_NUM_REGS;
342 }
343
344 static int
345 x86_cannot_fetch_register (int regno)
346 {
347 #ifdef __x86_64__
348 if (is_64bit_tdesc ())
349 return 0;
350 #endif
351
352 return regno >= I386_NUM_REGS;
353 }
354
355 static void
356 x86_fill_gregset (struct regcache *regcache, void *buf)
357 {
358 int i;
359
360 #ifdef __x86_64__
361 if (register_size (regcache->tdesc, 0) == 8)
362 {
363 for (i = 0; i < X86_64_NUM_REGS; i++)
364 if (x86_64_regmap[i] != -1)
365 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
366 return;
367 }
368
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf, 0x00, X86_64_USER_REGS * 8);
372 #endif
373
374 for (i = 0; i < I386_NUM_REGS; i++)
375 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
376
377 collect_register_by_name (regcache, "orig_eax",
378 ((char *) buf) + ORIG_EAX * 4);
379 }
380
381 static void
382 x86_store_gregset (struct regcache *regcache, const void *buf)
383 {
384 int i;
385
386 #ifdef __x86_64__
387 if (register_size (regcache->tdesc, 0) == 8)
388 {
389 for (i = 0; i < X86_64_NUM_REGS; i++)
390 if (x86_64_regmap[i] != -1)
391 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
392 return;
393 }
394 #endif
395
396 for (i = 0; i < I386_NUM_REGS; i++)
397 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
398
399 supply_register_by_name (regcache, "orig_eax",
400 ((char *) buf) + ORIG_EAX * 4);
401 }
402
403 static void
404 x86_fill_fpregset (struct regcache *regcache, void *buf)
405 {
406 #ifdef __x86_64__
407 i387_cache_to_fxsave (regcache, buf);
408 #else
409 i387_cache_to_fsave (regcache, buf);
410 #endif
411 }
412
413 static void
414 x86_store_fpregset (struct regcache *regcache, const void *buf)
415 {
416 #ifdef __x86_64__
417 i387_fxsave_to_cache (regcache, buf);
418 #else
419 i387_fsave_to_cache (regcache, buf);
420 #endif
421 }
422
423 #ifndef __x86_64__
424
425 static void
426 x86_fill_fpxregset (struct regcache *regcache, void *buf)
427 {
428 i387_cache_to_fxsave (regcache, buf);
429 }
430
431 static void
432 x86_store_fpxregset (struct regcache *regcache, const void *buf)
433 {
434 i387_fxsave_to_cache (regcache, buf);
435 }
436
437 #endif
438
439 static void
440 x86_fill_xstateregset (struct regcache *regcache, void *buf)
441 {
442 i387_cache_to_xsave (regcache, buf);
443 }
444
445 static void
446 x86_store_xstateregset (struct regcache *regcache, const void *buf)
447 {
448 i387_xsave_to_cache (regcache, buf);
449 }
450
451 /* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
456 and update the supported regsets accordingly. */
457
458 static struct regset_info x86_regsets[] =
459 {
460 #ifdef HAVE_PTRACE_GETREGS
461 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
462 GENERAL_REGS,
463 x86_fill_gregset, x86_store_gregset },
464 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
465 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
466 # ifndef __x86_64__
467 # ifdef HAVE_PTRACE_GETFPXREGS
468 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
469 EXTENDED_REGS,
470 x86_fill_fpxregset, x86_store_fpxregset },
471 # endif
472 # endif
473 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
474 FP_REGS,
475 x86_fill_fpregset, x86_store_fpregset },
476 #endif /* HAVE_PTRACE_GETREGS */
477 { 0, 0, 0, -1, -1, NULL, NULL }
478 };
479
480 static CORE_ADDR
481 x86_get_pc (struct regcache *regcache)
482 {
483 int use_64bit = register_size (regcache->tdesc, 0) == 8;
484
485 if (use_64bit)
486 {
487 unsigned long pc;
488 collect_register_by_name (regcache, "rip", &pc);
489 return (CORE_ADDR) pc;
490 }
491 else
492 {
493 unsigned int pc;
494 collect_register_by_name (regcache, "eip", &pc);
495 return (CORE_ADDR) pc;
496 }
497 }
498
499 static void
500 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
501 {
502 int use_64bit = register_size (regcache->tdesc, 0) == 8;
503
504 if (use_64bit)
505 {
506 unsigned long newpc = pc;
507 supply_register_by_name (regcache, "rip", &newpc);
508 }
509 else
510 {
511 unsigned int newpc = pc;
512 supply_register_by_name (regcache, "eip", &newpc);
513 }
514 }
515 \f
516 static const unsigned char x86_breakpoint[] = { 0xCC };
517 #define x86_breakpoint_len 1
518
519 static int
520 x86_breakpoint_at (CORE_ADDR pc)
521 {
522 unsigned char c;
523
524 (*the_target->read_memory) (pc, &c, 1);
525 if (c == 0xCC)
526 return 1;
527
528 return 0;
529 }
530 \f
531 /* Support for debug registers. */
532
533 static unsigned long
534 x86_linux_dr_get (ptid_t ptid, int regnum)
535 {
536 int tid;
537 unsigned long value;
538
539 tid = ptid_get_lwp (ptid);
540
541 errno = 0;
542 value = ptrace (PTRACE_PEEKUSER, tid,
543 offsetof (struct user, u_debugreg[regnum]), 0);
544 if (errno != 0)
545 error ("Couldn't read debug register");
546
547 return value;
548 }
549
550 static void
551 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
552 {
553 int tid;
554
555 tid = ptid_get_lwp (ptid);
556
557 errno = 0;
558 ptrace (PTRACE_POKEUSER, tid,
559 offsetof (struct user, u_debugreg[regnum]), value);
560 if (errno != 0)
561 error ("Couldn't write debug register");
562 }
563
564 static int
565 update_debug_registers_callback (struct inferior_list_entry *entry,
566 void *pid_p)
567 {
568 struct thread_info *thr = (struct thread_info *) entry;
569 struct lwp_info *lwp = get_thread_lwp (thr);
570 int pid = *(int *) pid_p;
571
572 /* Only update the threads of this process. */
573 if (pid_of (thr) == pid)
574 {
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp->arch_private->debug_registers_changed = 1;
578
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
581 if (!lwp->stopped)
582 linux_stop_lwp (lwp);
583 }
584
585 return 0;
586 }
587
588 /* Update the inferior's debug register REGNUM from STATE. */
589
590 static void
591 i386_dr_low_set_addr (int regnum, CORE_ADDR addr)
592 {
593 /* Only update the threads of this process. */
594 int pid = pid_of (current_inferior);
595
596 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
597 fatal ("Invalid debug register %d", regnum);
598
599 find_inferior (&all_threads, update_debug_registers_callback, &pid);
600 }
601
602 /* Return the inferior's debug register REGNUM. */
603
604 static CORE_ADDR
605 i386_dr_low_get_addr (int regnum)
606 {
607 ptid_t ptid = ptid_of (current_inferior);
608
609 /* DR6 and DR7 are retrieved with some other way. */
610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
611
612 return x86_linux_dr_get (ptid, regnum);
613 }
614
615 /* Update the inferior's DR7 debug control register from STATE. */
616
617 static void
618 i386_dr_low_set_control (unsigned long control)
619 {
620 /* Only update the threads of this process. */
621 int pid = pid_of (current_inferior);
622
623 find_inferior (&all_threads, update_debug_registers_callback, &pid);
624 }
625
626 /* Return the inferior's DR7 debug control register. */
627
628 static unsigned long
629 i386_dr_low_get_control (void)
630 {
631 ptid_t ptid = ptid_of (current_inferior);
632
633 return x86_linux_dr_get (ptid, DR_CONTROL);
634 }
635
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
638
639 static unsigned long
640 i386_dr_low_get_status (void)
641 {
642 ptid_t ptid = ptid_of (current_inferior);
643
644 return x86_linux_dr_get (ptid, DR_STATUS);
645 }
646
647 /* Low-level function vector. */
648 struct i386_dr_low_type i386_dr_low =
649 {
650 i386_dr_low_set_control,
651 i386_dr_low_set_addr,
652 i386_dr_low_get_addr,
653 i386_dr_low_get_status,
654 i386_dr_low_get_control,
655 sizeof (void *),
656 };
657 \f
658 /* Breakpoint/Watchpoint support. */
659
660 static int
661 x86_supports_z_point_type (char z_type)
662 {
663 switch (z_type)
664 {
665 case Z_PACKET_SW_BP:
666 case Z_PACKET_HW_BP:
667 case Z_PACKET_WRITE_WP:
668 case Z_PACKET_ACCESS_WP:
669 return 1;
670 default:
671 return 0;
672 }
673 }
674
675 static int
676 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
677 int size, struct raw_breakpoint *bp)
678 {
679 struct process_info *proc = current_process ();
680
681 switch (type)
682 {
683 case raw_bkpt_type_sw:
684 return insert_memory_breakpoint (bp);
685
686 case raw_bkpt_type_hw:
687 case raw_bkpt_type_write_wp:
688 case raw_bkpt_type_access_wp:
689 {
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type);
692 struct i386_debug_reg_state *state
693 = &proc->private->arch_private->debug_reg_state;
694
695 return i386_dr_insert_watchpoint (state, hw_type, addr, size);
696 }
697
698 default:
699 /* Unsupported. */
700 return 1;
701 }
702 }
703
704 static int
705 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
706 int size, struct raw_breakpoint *bp)
707 {
708 struct process_info *proc = current_process ();
709
710 switch (type)
711 {
712 case raw_bkpt_type_sw:
713 return remove_memory_breakpoint (bp);
714
715 case raw_bkpt_type_hw:
716 case raw_bkpt_type_write_wp:
717 case raw_bkpt_type_access_wp:
718 {
719 enum target_hw_bp_type hw_type
720 = raw_bkpt_type_to_target_hw_bp_type (type);
721 struct i386_debug_reg_state *state
722 = &proc->private->arch_private->debug_reg_state;
723
724 return i386_dr_remove_watchpoint (state, hw_type, addr, size);
725 }
726 default:
727 /* Unsupported. */
728 return 1;
729 }
730 }
731
732 static int
733 x86_stopped_by_watchpoint (void)
734 {
735 struct process_info *proc = current_process ();
736 return i386_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
737 }
738
739 static CORE_ADDR
740 x86_stopped_data_address (void)
741 {
742 struct process_info *proc = current_process ();
743 CORE_ADDR addr;
744 if (i386_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
745 &addr))
746 return addr;
747 return 0;
748 }
749 \f
750 /* Called when a new process is created. */
751
752 static struct arch_process_info *
753 x86_linux_new_process (void)
754 {
755 struct arch_process_info *info = xcalloc (1, sizeof (*info));
756
757 i386_low_init_dregs (&info->debug_reg_state);
758
759 return info;
760 }
761
762 /* Called when a new thread is detected. */
763
764 static struct arch_lwp_info *
765 x86_linux_new_thread (void)
766 {
767 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
768
769 info->debug_registers_changed = 1;
770
771 return info;
772 }
773
774 /* Called when resuming a thread.
775 If the debug regs have changed, update the thread's copies. */
776
777 static void
778 x86_linux_prepare_to_resume (struct lwp_info *lwp)
779 {
780 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
781 int clear_status = 0;
782
783 if (lwp->arch_private->debug_registers_changed)
784 {
785 int i;
786 int pid = ptid_get_pid (ptid);
787 struct process_info *proc = find_process_pid (pid);
788 struct i386_debug_reg_state *state
789 = &proc->private->arch_private->debug_reg_state;
790
791 x86_linux_dr_set (ptid, DR_CONTROL, 0);
792
793 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
794 if (state->dr_ref_count[i] > 0)
795 {
796 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
797
798 /* If we're setting a watchpoint, any change the inferior
799 had done itself to the debug registers needs to be
800 discarded, otherwise, i386_dr_stopped_data_address can
801 get confused. */
802 clear_status = 1;
803 }
804
805 if (state->dr_control_mirror != 0)
806 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
807
808 lwp->arch_private->debug_registers_changed = 0;
809 }
810
811 if (clear_status || lwp->stopped_by_watchpoint)
812 x86_linux_dr_set (ptid, DR_STATUS, 0);
813 }
814 \f
815 /* When GDBSERVER is built as a 64-bit application on linux, the
816 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
817 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
818 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
819 conversion in-place ourselves. */
820
821 /* These types below (compat_*) define a siginfo type that is layout
822 compatible with the siginfo type exported by the 32-bit userspace
823 support. */
824
825 #ifdef __x86_64__
826
827 typedef int compat_int_t;
828 typedef unsigned int compat_uptr_t;
829
830 typedef int compat_time_t;
831 typedef int compat_timer_t;
832 typedef int compat_clock_t;
833
834 struct compat_timeval
835 {
836 compat_time_t tv_sec;
837 int tv_usec;
838 };
839
840 typedef union compat_sigval
841 {
842 compat_int_t sival_int;
843 compat_uptr_t sival_ptr;
844 } compat_sigval_t;
845
846 typedef struct compat_siginfo
847 {
848 int si_signo;
849 int si_errno;
850 int si_code;
851
852 union
853 {
854 int _pad[((128 / sizeof (int)) - 3)];
855
856 /* kill() */
857 struct
858 {
859 unsigned int _pid;
860 unsigned int _uid;
861 } _kill;
862
863 /* POSIX.1b timers */
864 struct
865 {
866 compat_timer_t _tid;
867 int _overrun;
868 compat_sigval_t _sigval;
869 } _timer;
870
871 /* POSIX.1b signals */
872 struct
873 {
874 unsigned int _pid;
875 unsigned int _uid;
876 compat_sigval_t _sigval;
877 } _rt;
878
879 /* SIGCHLD */
880 struct
881 {
882 unsigned int _pid;
883 unsigned int _uid;
884 int _status;
885 compat_clock_t _utime;
886 compat_clock_t _stime;
887 } _sigchld;
888
889 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
890 struct
891 {
892 unsigned int _addr;
893 } _sigfault;
894
895 /* SIGPOLL */
896 struct
897 {
898 int _band;
899 int _fd;
900 } _sigpoll;
901 } _sifields;
902 } compat_siginfo_t;
903
904 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
905 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
906
907 typedef struct compat_x32_siginfo
908 {
909 int si_signo;
910 int si_errno;
911 int si_code;
912
913 union
914 {
915 int _pad[((128 / sizeof (int)) - 3)];
916
917 /* kill() */
918 struct
919 {
920 unsigned int _pid;
921 unsigned int _uid;
922 } _kill;
923
924 /* POSIX.1b timers */
925 struct
926 {
927 compat_timer_t _tid;
928 int _overrun;
929 compat_sigval_t _sigval;
930 } _timer;
931
932 /* POSIX.1b signals */
933 struct
934 {
935 unsigned int _pid;
936 unsigned int _uid;
937 compat_sigval_t _sigval;
938 } _rt;
939
940 /* SIGCHLD */
941 struct
942 {
943 unsigned int _pid;
944 unsigned int _uid;
945 int _status;
946 compat_x32_clock_t _utime;
947 compat_x32_clock_t _stime;
948 } _sigchld;
949
950 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
951 struct
952 {
953 unsigned int _addr;
954 } _sigfault;
955
956 /* SIGPOLL */
957 struct
958 {
959 int _band;
960 int _fd;
961 } _sigpoll;
962 } _sifields;
963 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
964
965 #define cpt_si_pid _sifields._kill._pid
966 #define cpt_si_uid _sifields._kill._uid
967 #define cpt_si_timerid _sifields._timer._tid
968 #define cpt_si_overrun _sifields._timer._overrun
969 #define cpt_si_status _sifields._sigchld._status
970 #define cpt_si_utime _sifields._sigchld._utime
971 #define cpt_si_stime _sifields._sigchld._stime
972 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
973 #define cpt_si_addr _sifields._sigfault._addr
974 #define cpt_si_band _sifields._sigpoll._band
975 #define cpt_si_fd _sifields._sigpoll._fd
976
977 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
978 In their place is si_timer1,si_timer2. */
979 #ifndef si_timerid
980 #define si_timerid si_timer1
981 #endif
982 #ifndef si_overrun
983 #define si_overrun si_timer2
984 #endif
985
986 static void
987 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
988 {
989 memset (to, 0, sizeof (*to));
990
991 to->si_signo = from->si_signo;
992 to->si_errno = from->si_errno;
993 to->si_code = from->si_code;
994
995 if (to->si_code == SI_TIMER)
996 {
997 to->cpt_si_timerid = from->si_timerid;
998 to->cpt_si_overrun = from->si_overrun;
999 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1000 }
1001 else if (to->si_code == SI_USER)
1002 {
1003 to->cpt_si_pid = from->si_pid;
1004 to->cpt_si_uid = from->si_uid;
1005 }
1006 else if (to->si_code < 0)
1007 {
1008 to->cpt_si_pid = from->si_pid;
1009 to->cpt_si_uid = from->si_uid;
1010 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1011 }
1012 else
1013 {
1014 switch (to->si_signo)
1015 {
1016 case SIGCHLD:
1017 to->cpt_si_pid = from->si_pid;
1018 to->cpt_si_uid = from->si_uid;
1019 to->cpt_si_status = from->si_status;
1020 to->cpt_si_utime = from->si_utime;
1021 to->cpt_si_stime = from->si_stime;
1022 break;
1023 case SIGILL:
1024 case SIGFPE:
1025 case SIGSEGV:
1026 case SIGBUS:
1027 to->cpt_si_addr = (intptr_t) from->si_addr;
1028 break;
1029 case SIGPOLL:
1030 to->cpt_si_band = from->si_band;
1031 to->cpt_si_fd = from->si_fd;
1032 break;
1033 default:
1034 to->cpt_si_pid = from->si_pid;
1035 to->cpt_si_uid = from->si_uid;
1036 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1037 break;
1038 }
1039 }
1040 }
1041
1042 static void
1043 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1044 {
1045 memset (to, 0, sizeof (*to));
1046
1047 to->si_signo = from->si_signo;
1048 to->si_errno = from->si_errno;
1049 to->si_code = from->si_code;
1050
1051 if (to->si_code == SI_TIMER)
1052 {
1053 to->si_timerid = from->cpt_si_timerid;
1054 to->si_overrun = from->cpt_si_overrun;
1055 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1056 }
1057 else if (to->si_code == SI_USER)
1058 {
1059 to->si_pid = from->cpt_si_pid;
1060 to->si_uid = from->cpt_si_uid;
1061 }
1062 else if (to->si_code < 0)
1063 {
1064 to->si_pid = from->cpt_si_pid;
1065 to->si_uid = from->cpt_si_uid;
1066 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1067 }
1068 else
1069 {
1070 switch (to->si_signo)
1071 {
1072 case SIGCHLD:
1073 to->si_pid = from->cpt_si_pid;
1074 to->si_uid = from->cpt_si_uid;
1075 to->si_status = from->cpt_si_status;
1076 to->si_utime = from->cpt_si_utime;
1077 to->si_stime = from->cpt_si_stime;
1078 break;
1079 case SIGILL:
1080 case SIGFPE:
1081 case SIGSEGV:
1082 case SIGBUS:
1083 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1084 break;
1085 case SIGPOLL:
1086 to->si_band = from->cpt_si_band;
1087 to->si_fd = from->cpt_si_fd;
1088 break;
1089 default:
1090 to->si_pid = from->cpt_si_pid;
1091 to->si_uid = from->cpt_si_uid;
1092 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1093 break;
1094 }
1095 }
1096 }
1097
1098 static void
1099 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1100 siginfo_t *from)
1101 {
1102 memset (to, 0, sizeof (*to));
1103
1104 to->si_signo = from->si_signo;
1105 to->si_errno = from->si_errno;
1106 to->si_code = from->si_code;
1107
1108 if (to->si_code == SI_TIMER)
1109 {
1110 to->cpt_si_timerid = from->si_timerid;
1111 to->cpt_si_overrun = from->si_overrun;
1112 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1113 }
1114 else if (to->si_code == SI_USER)
1115 {
1116 to->cpt_si_pid = from->si_pid;
1117 to->cpt_si_uid = from->si_uid;
1118 }
1119 else if (to->si_code < 0)
1120 {
1121 to->cpt_si_pid = from->si_pid;
1122 to->cpt_si_uid = from->si_uid;
1123 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1124 }
1125 else
1126 {
1127 switch (to->si_signo)
1128 {
1129 case SIGCHLD:
1130 to->cpt_si_pid = from->si_pid;
1131 to->cpt_si_uid = from->si_uid;
1132 to->cpt_si_status = from->si_status;
1133 to->cpt_si_utime = from->si_utime;
1134 to->cpt_si_stime = from->si_stime;
1135 break;
1136 case SIGILL:
1137 case SIGFPE:
1138 case SIGSEGV:
1139 case SIGBUS:
1140 to->cpt_si_addr = (intptr_t) from->si_addr;
1141 break;
1142 case SIGPOLL:
1143 to->cpt_si_band = from->si_band;
1144 to->cpt_si_fd = from->si_fd;
1145 break;
1146 default:
1147 to->cpt_si_pid = from->si_pid;
1148 to->cpt_si_uid = from->si_uid;
1149 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1150 break;
1151 }
1152 }
1153 }
1154
1155 static void
1156 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1157 compat_x32_siginfo_t *from)
1158 {
1159 memset (to, 0, sizeof (*to));
1160
1161 to->si_signo = from->si_signo;
1162 to->si_errno = from->si_errno;
1163 to->si_code = from->si_code;
1164
1165 if (to->si_code == SI_TIMER)
1166 {
1167 to->si_timerid = from->cpt_si_timerid;
1168 to->si_overrun = from->cpt_si_overrun;
1169 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1170 }
1171 else if (to->si_code == SI_USER)
1172 {
1173 to->si_pid = from->cpt_si_pid;
1174 to->si_uid = from->cpt_si_uid;
1175 }
1176 else if (to->si_code < 0)
1177 {
1178 to->si_pid = from->cpt_si_pid;
1179 to->si_uid = from->cpt_si_uid;
1180 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1181 }
1182 else
1183 {
1184 switch (to->si_signo)
1185 {
1186 case SIGCHLD:
1187 to->si_pid = from->cpt_si_pid;
1188 to->si_uid = from->cpt_si_uid;
1189 to->si_status = from->cpt_si_status;
1190 to->si_utime = from->cpt_si_utime;
1191 to->si_stime = from->cpt_si_stime;
1192 break;
1193 case SIGILL:
1194 case SIGFPE:
1195 case SIGSEGV:
1196 case SIGBUS:
1197 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1198 break;
1199 case SIGPOLL:
1200 to->si_band = from->cpt_si_band;
1201 to->si_fd = from->cpt_si_fd;
1202 break;
1203 default:
1204 to->si_pid = from->cpt_si_pid;
1205 to->si_uid = from->cpt_si_uid;
1206 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1207 break;
1208 }
1209 }
1210 }
1211
1212 #endif /* __x86_64__ */
1213
1214 /* Convert a native/host siginfo object, into/from the siginfo in the
1215 layout of the inferiors' architecture. Returns true if any
1216 conversion was done; false otherwise. If DIRECTION is 1, then copy
1217 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1218 INF. */
1219
1220 static int
1221 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1222 {
1223 #ifdef __x86_64__
1224 unsigned int machine;
1225 int tid = lwpid_of (current_inferior);
1226 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1227
1228 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1229 if (!is_64bit_tdesc ())
1230 {
1231 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
1232 fatal ("unexpected difference in siginfo");
1233
1234 if (direction == 0)
1235 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1236 else
1237 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1238
1239 return 1;
1240 }
1241 /* No fixup for native x32 GDB. */
1242 else if (!is_elf64 && sizeof (void *) == 8)
1243 {
1244 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1245 fatal ("unexpected difference in siginfo");
1246
1247 if (direction == 0)
1248 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1249 native);
1250 else
1251 siginfo_from_compat_x32_siginfo (native,
1252 (struct compat_x32_siginfo *) inf);
1253
1254 return 1;
1255 }
1256 #endif
1257
1258 return 0;
1259 }
1260 \f
1261 static int use_xml;
1262
1263 /* Format of XSAVE extended state is:
1264 struct
1265 {
1266 fxsave_bytes[0..463]
1267 sw_usable_bytes[464..511]
1268 xstate_hdr_bytes[512..575]
1269 avx_bytes[576..831]
1270 future_state etc
1271 };
1272
1273 Same memory layout will be used for the coredump NT_X86_XSTATE
1274 representing the XSAVE extended state registers.
1275
1276 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1277 extended state mask, which is the same as the extended control register
1278 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1279 together with the mask saved in the xstate_hdr_bytes to determine what
1280 states the processor/OS supports and what state, used or initialized,
1281 the process/thread is in. */
1282 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1283
1284 /* Does the current host support the GETFPXREGS request? The header
1285 file may or may not define it, and even if it is defined, the
1286 kernel will return EIO if it's running on a pre-SSE processor. */
1287 int have_ptrace_getfpxregs =
1288 #ifdef HAVE_PTRACE_GETFPXREGS
1289 -1
1290 #else
1291 0
1292 #endif
1293 ;
1294
1295 /* Does the current host support PTRACE_GETREGSET? */
1296 static int have_ptrace_getregset = -1;
1297
1298 /* Get Linux/x86 target description from running target. */
1299
1300 static const struct target_desc *
1301 x86_linux_read_description (void)
1302 {
1303 unsigned int machine;
1304 int is_elf64;
1305 int xcr0_features;
1306 int tid;
1307 static uint64_t xcr0;
1308 struct regset_info *regset;
1309
1310 tid = lwpid_of (current_inferior);
1311
1312 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1313
1314 if (sizeof (void *) == 4)
1315 {
1316 if (is_elf64 > 0)
1317 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1318 #ifndef __x86_64__
1319 else if (machine == EM_X86_64)
1320 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1321 #endif
1322 }
1323
1324 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1325 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1326 {
1327 elf_fpxregset_t fpxregs;
1328
1329 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1330 {
1331 have_ptrace_getfpxregs = 0;
1332 have_ptrace_getregset = 0;
1333 return tdesc_i386_mmx_linux;
1334 }
1335 else
1336 have_ptrace_getfpxregs = 1;
1337 }
1338 #endif
1339
1340 if (!use_xml)
1341 {
1342 x86_xcr0 = I386_XSTATE_SSE_MASK;
1343
1344 /* Don't use XML. */
1345 #ifdef __x86_64__
1346 if (machine == EM_X86_64)
1347 return tdesc_amd64_linux_no_xml;
1348 else
1349 #endif
1350 return tdesc_i386_linux_no_xml;
1351 }
1352
1353 if (have_ptrace_getregset == -1)
1354 {
1355 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1356 struct iovec iov;
1357
1358 iov.iov_base = xstateregs;
1359 iov.iov_len = sizeof (xstateregs);
1360
1361 /* Check if PTRACE_GETREGSET works. */
1362 if (ptrace (PTRACE_GETREGSET, tid,
1363 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1364 have_ptrace_getregset = 0;
1365 else
1366 {
1367 have_ptrace_getregset = 1;
1368
1369 /* Get XCR0 from XSAVE extended state. */
1370 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1371 / sizeof (uint64_t))];
1372
1373 /* Use PTRACE_GETREGSET if it is available. */
1374 for (regset = x86_regsets;
1375 regset->fill_function != NULL; regset++)
1376 if (regset->get_request == PTRACE_GETREGSET)
1377 regset->size = I386_XSTATE_SIZE (xcr0);
1378 else if (regset->type != GENERAL_REGS)
1379 regset->size = 0;
1380 }
1381 }
1382
1383 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1384 xcr0_features = (have_ptrace_getregset
1385 && (xcr0 & I386_XSTATE_ALL_MASK));
1386
1387 if (xcr0_features)
1388 x86_xcr0 = xcr0;
1389
1390 if (machine == EM_X86_64)
1391 {
1392 #ifdef __x86_64__
1393 if (is_elf64)
1394 {
1395 if (xcr0_features)
1396 {
1397 switch (xcr0 & I386_XSTATE_ALL_MASK)
1398 {
1399 case I386_XSTATE_AVX512_MASK:
1400 return tdesc_amd64_avx512_linux;
1401
1402 case I386_XSTATE_MPX_MASK:
1403 return tdesc_amd64_mpx_linux;
1404
1405 case I386_XSTATE_AVX_MASK:
1406 return tdesc_amd64_avx_linux;
1407
1408 default:
1409 return tdesc_amd64_linux;
1410 }
1411 }
1412 else
1413 return tdesc_amd64_linux;
1414 }
1415 else
1416 {
1417 if (xcr0_features)
1418 {
1419 switch (xcr0 & I386_XSTATE_ALL_MASK)
1420 {
1421 case I386_XSTATE_AVX512_MASK:
1422 return tdesc_x32_avx512_linux;
1423
1424 case I386_XSTATE_MPX_MASK: /* No MPX on x32. */
1425 case I386_XSTATE_AVX_MASK:
1426 return tdesc_x32_avx_linux;
1427
1428 default:
1429 return tdesc_x32_linux;
1430 }
1431 }
1432 else
1433 return tdesc_x32_linux;
1434 }
1435 #endif
1436 }
1437 else
1438 {
1439 if (xcr0_features)
1440 {
1441 switch (xcr0 & I386_XSTATE_ALL_MASK)
1442 {
1443 case (I386_XSTATE_AVX512_MASK):
1444 return tdesc_i386_avx512_linux;
1445
1446 case (I386_XSTATE_MPX_MASK):
1447 return tdesc_i386_mpx_linux;
1448
1449 case (I386_XSTATE_AVX_MASK):
1450 return tdesc_i386_avx_linux;
1451
1452 default:
1453 return tdesc_i386_linux;
1454 }
1455 }
1456 else
1457 return tdesc_i386_linux;
1458 }
1459
1460 gdb_assert_not_reached ("failed to return tdesc");
1461 }
1462
1463 /* Callback for find_inferior. Stops iteration when a thread with a
1464 given PID is found. */
1465
1466 static int
1467 same_process_callback (struct inferior_list_entry *entry, void *data)
1468 {
1469 int pid = *(int *) data;
1470
1471 return (ptid_get_pid (entry->id) == pid);
1472 }
1473
1474 /* Callback for for_each_inferior. Calls the arch_setup routine for
1475 each process. */
1476
1477 static void
1478 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1479 {
1480 int pid = ptid_get_pid (entry->id);
1481
1482 /* Look up any thread of this processes. */
1483 current_inferior
1484 = (struct thread_info *) find_inferior (&all_threads,
1485 same_process_callback, &pid);
1486
1487 the_low_target.arch_setup ();
1488 }
1489
1490 /* Update all the target description of all processes; a new GDB
1491 connected, and it may or not support xml target descriptions. */
1492
1493 static void
1494 x86_linux_update_xmltarget (void)
1495 {
1496 struct thread_info *save_inferior = current_inferior;
1497
1498 /* Before changing the register cache's internal layout, flush the
1499 contents of the current valid caches back to the threads, and
1500 release the current regcache objects. */
1501 regcache_release ();
1502
1503 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1504
1505 current_inferior = save_inferior;
1506 }
1507
1508 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1509 PTRACE_GETREGSET. */
1510
1511 static void
1512 x86_linux_process_qsupported (const char *query)
1513 {
1514 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1515 with "i386" in qSupported query, it supports x86 XML target
1516 descriptions. */
1517 use_xml = 0;
1518 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1519 {
1520 char *copy = xstrdup (query + 13);
1521 char *p;
1522
1523 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1524 {
1525 if (strcmp (p, "i386") == 0)
1526 {
1527 use_xml = 1;
1528 break;
1529 }
1530 }
1531
1532 free (copy);
1533 }
1534
1535 x86_linux_update_xmltarget ();
1536 }
1537
1538 /* Common for x86/x86-64. */
1539
1540 static struct regsets_info x86_regsets_info =
1541 {
1542 x86_regsets, /* regsets */
1543 0, /* num_regsets */
1544 NULL, /* disabled_regsets */
1545 };
1546
1547 #ifdef __x86_64__
1548 static struct regs_info amd64_linux_regs_info =
1549 {
1550 NULL, /* regset_bitmap */
1551 NULL, /* usrregs_info */
1552 &x86_regsets_info
1553 };
1554 #endif
1555 static struct usrregs_info i386_linux_usrregs_info =
1556 {
1557 I386_NUM_REGS,
1558 i386_regmap,
1559 };
1560
1561 static struct regs_info i386_linux_regs_info =
1562 {
1563 NULL, /* regset_bitmap */
1564 &i386_linux_usrregs_info,
1565 &x86_regsets_info
1566 };
1567
1568 const struct regs_info *
1569 x86_linux_regs_info (void)
1570 {
1571 #ifdef __x86_64__
1572 if (is_64bit_tdesc ())
1573 return &amd64_linux_regs_info;
1574 else
1575 #endif
1576 return &i386_linux_regs_info;
1577 }
1578
1579 /* Initialize the target description for the architecture of the
1580 inferior. */
1581
1582 static void
1583 x86_arch_setup (void)
1584 {
1585 current_process ()->tdesc = x86_linux_read_description ();
1586 }
1587
1588 static int
1589 x86_supports_tracepoints (void)
1590 {
1591 return 1;
1592 }
1593
1594 static void
1595 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1596 {
1597 write_inferior_memory (*to, buf, len);
1598 *to += len;
1599 }
1600
1601 static int
1602 push_opcode (unsigned char *buf, char *op)
1603 {
1604 unsigned char *buf_org = buf;
1605
1606 while (1)
1607 {
1608 char *endptr;
1609 unsigned long ul = strtoul (op, &endptr, 16);
1610
1611 if (endptr == op)
1612 break;
1613
1614 *buf++ = ul;
1615 op = endptr;
1616 }
1617
1618 return buf - buf_org;
1619 }
1620
1621 #ifdef __x86_64__
1622
1623 /* Build a jump pad that saves registers and calls a collection
1624 function. Writes a jump instruction to the jump pad to
1625 JJUMPAD_INSN. The caller is responsible to write it in at the
1626 tracepoint address. */
1627
1628 static int
1629 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1630 CORE_ADDR collector,
1631 CORE_ADDR lockaddr,
1632 ULONGEST orig_size,
1633 CORE_ADDR *jump_entry,
1634 CORE_ADDR *trampoline,
1635 ULONGEST *trampoline_size,
1636 unsigned char *jjump_pad_insn,
1637 ULONGEST *jjump_pad_insn_size,
1638 CORE_ADDR *adjusted_insn_addr,
1639 CORE_ADDR *adjusted_insn_addr_end,
1640 char *err)
1641 {
1642 unsigned char buf[40];
1643 int i, offset;
1644 int64_t loffset;
1645
1646 CORE_ADDR buildaddr = *jump_entry;
1647
1648 /* Build the jump pad. */
1649
1650 /* First, do tracepoint data collection. Save registers. */
1651 i = 0;
1652 /* Need to ensure stack pointer saved first. */
1653 buf[i++] = 0x54; /* push %rsp */
1654 buf[i++] = 0x55; /* push %rbp */
1655 buf[i++] = 0x57; /* push %rdi */
1656 buf[i++] = 0x56; /* push %rsi */
1657 buf[i++] = 0x52; /* push %rdx */
1658 buf[i++] = 0x51; /* push %rcx */
1659 buf[i++] = 0x53; /* push %rbx */
1660 buf[i++] = 0x50; /* push %rax */
1661 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1662 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1663 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1664 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1665 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1666 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1667 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1668 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1669 buf[i++] = 0x9c; /* pushfq */
1670 buf[i++] = 0x48; /* movl <addr>,%rdi */
1671 buf[i++] = 0xbf;
1672 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1673 i += sizeof (unsigned long);
1674 buf[i++] = 0x57; /* push %rdi */
1675 append_insns (&buildaddr, i, buf);
1676
1677 /* Stack space for the collecting_t object. */
1678 i = 0;
1679 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1680 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1681 memcpy (buf + i, &tpoint, 8);
1682 i += 8;
1683 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1684 i += push_opcode (&buf[i],
1685 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1686 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1687 append_insns (&buildaddr, i, buf);
1688
1689 /* spin-lock. */
1690 i = 0;
1691 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1692 memcpy (&buf[i], (void *) &lockaddr, 8);
1693 i += 8;
1694 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1695 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1696 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1697 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1698 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1699 append_insns (&buildaddr, i, buf);
1700
1701 /* Set up the gdb_collect call. */
1702 /* At this point, (stack pointer + 0x18) is the base of our saved
1703 register block. */
1704
1705 i = 0;
1706 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1707 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1708
1709 /* tpoint address may be 64-bit wide. */
1710 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1711 memcpy (buf + i, &tpoint, 8);
1712 i += 8;
1713 append_insns (&buildaddr, i, buf);
1714
1715 /* The collector function being in the shared library, may be
1716 >31-bits away off the jump pad. */
1717 i = 0;
1718 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1719 memcpy (buf + i, &collector, 8);
1720 i += 8;
1721 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1722 append_insns (&buildaddr, i, buf);
1723
1724 /* Clear the spin-lock. */
1725 i = 0;
1726 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1727 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1728 memcpy (buf + i, &lockaddr, 8);
1729 i += 8;
1730 append_insns (&buildaddr, i, buf);
1731
1732 /* Remove stack that had been used for the collect_t object. */
1733 i = 0;
1734 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1735 append_insns (&buildaddr, i, buf);
1736
1737 /* Restore register state. */
1738 i = 0;
1739 buf[i++] = 0x48; /* add $0x8,%rsp */
1740 buf[i++] = 0x83;
1741 buf[i++] = 0xc4;
1742 buf[i++] = 0x08;
1743 buf[i++] = 0x9d; /* popfq */
1744 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1745 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1746 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1747 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1748 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1749 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1750 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1751 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1752 buf[i++] = 0x58; /* pop %rax */
1753 buf[i++] = 0x5b; /* pop %rbx */
1754 buf[i++] = 0x59; /* pop %rcx */
1755 buf[i++] = 0x5a; /* pop %rdx */
1756 buf[i++] = 0x5e; /* pop %rsi */
1757 buf[i++] = 0x5f; /* pop %rdi */
1758 buf[i++] = 0x5d; /* pop %rbp */
1759 buf[i++] = 0x5c; /* pop %rsp */
1760 append_insns (&buildaddr, i, buf);
1761
1762 /* Now, adjust the original instruction to execute in the jump
1763 pad. */
1764 *adjusted_insn_addr = buildaddr;
1765 relocate_instruction (&buildaddr, tpaddr);
1766 *adjusted_insn_addr_end = buildaddr;
1767
1768 /* Finally, write a jump back to the program. */
1769
1770 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1771 if (loffset > INT_MAX || loffset < INT_MIN)
1772 {
1773 sprintf (err,
1774 "E.Jump back from jump pad too far from tracepoint "
1775 "(offset 0x%" PRIx64 " > int32).", loffset);
1776 return 1;
1777 }
1778
1779 offset = (int) loffset;
1780 memcpy (buf, jump_insn, sizeof (jump_insn));
1781 memcpy (buf + 1, &offset, 4);
1782 append_insns (&buildaddr, sizeof (jump_insn), buf);
1783
1784 /* The jump pad is now built. Wire in a jump to our jump pad. This
1785 is always done last (by our caller actually), so that we can
1786 install fast tracepoints with threads running. This relies on
1787 the agent's atomic write support. */
1788 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1789 if (loffset > INT_MAX || loffset < INT_MIN)
1790 {
1791 sprintf (err,
1792 "E.Jump pad too far from tracepoint "
1793 "(offset 0x%" PRIx64 " > int32).", loffset);
1794 return 1;
1795 }
1796
1797 offset = (int) loffset;
1798
1799 memcpy (buf, jump_insn, sizeof (jump_insn));
1800 memcpy (buf + 1, &offset, 4);
1801 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1802 *jjump_pad_insn_size = sizeof (jump_insn);
1803
1804 /* Return the end address of our pad. */
1805 *jump_entry = buildaddr;
1806
1807 return 0;
1808 }
1809
1810 #endif /* __x86_64__ */
1811
1812 /* Build a jump pad that saves registers and calls a collection
1813 function. Writes a jump instruction to the jump pad to
1814 JJUMPAD_INSN. The caller is responsible to write it in at the
1815 tracepoint address. */
1816
1817 static int
1818 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1819 CORE_ADDR collector,
1820 CORE_ADDR lockaddr,
1821 ULONGEST orig_size,
1822 CORE_ADDR *jump_entry,
1823 CORE_ADDR *trampoline,
1824 ULONGEST *trampoline_size,
1825 unsigned char *jjump_pad_insn,
1826 ULONGEST *jjump_pad_insn_size,
1827 CORE_ADDR *adjusted_insn_addr,
1828 CORE_ADDR *adjusted_insn_addr_end,
1829 char *err)
1830 {
1831 unsigned char buf[0x100];
1832 int i, offset;
1833 CORE_ADDR buildaddr = *jump_entry;
1834
1835 /* Build the jump pad. */
1836
1837 /* First, do tracepoint data collection. Save registers. */
1838 i = 0;
1839 buf[i++] = 0x60; /* pushad */
1840 buf[i++] = 0x68; /* push tpaddr aka $pc */
1841 *((int *)(buf + i)) = (int) tpaddr;
1842 i += 4;
1843 buf[i++] = 0x9c; /* pushf */
1844 buf[i++] = 0x1e; /* push %ds */
1845 buf[i++] = 0x06; /* push %es */
1846 buf[i++] = 0x0f; /* push %fs */
1847 buf[i++] = 0xa0;
1848 buf[i++] = 0x0f; /* push %gs */
1849 buf[i++] = 0xa8;
1850 buf[i++] = 0x16; /* push %ss */
1851 buf[i++] = 0x0e; /* push %cs */
1852 append_insns (&buildaddr, i, buf);
1853
1854 /* Stack space for the collecting_t object. */
1855 i = 0;
1856 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1857
1858 /* Build the object. */
1859 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1860 memcpy (buf + i, &tpoint, 4);
1861 i += 4;
1862 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1863
1864 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1865 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1866 append_insns (&buildaddr, i, buf);
1867
1868 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1869 If we cared for it, this could be using xchg alternatively. */
1870
1871 i = 0;
1872 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1873 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1874 %esp,<lockaddr> */
1875 memcpy (&buf[i], (void *) &lockaddr, 4);
1876 i += 4;
1877 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1878 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1879 append_insns (&buildaddr, i, buf);
1880
1881
1882 /* Set up arguments to the gdb_collect call. */
1883 i = 0;
1884 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1885 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1886 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1887 append_insns (&buildaddr, i, buf);
1888
1889 i = 0;
1890 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1891 append_insns (&buildaddr, i, buf);
1892
1893 i = 0;
1894 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1895 memcpy (&buf[i], (void *) &tpoint, 4);
1896 i += 4;
1897 append_insns (&buildaddr, i, buf);
1898
1899 buf[0] = 0xe8; /* call <reladdr> */
1900 offset = collector - (buildaddr + sizeof (jump_insn));
1901 memcpy (buf + 1, &offset, 4);
1902 append_insns (&buildaddr, 5, buf);
1903 /* Clean up after the call. */
1904 buf[0] = 0x83; /* add $0x8,%esp */
1905 buf[1] = 0xc4;
1906 buf[2] = 0x08;
1907 append_insns (&buildaddr, 3, buf);
1908
1909
1910 /* Clear the spin-lock. This would need the LOCK prefix on older
1911 broken archs. */
1912 i = 0;
1913 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1914 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1915 memcpy (buf + i, &lockaddr, 4);
1916 i += 4;
1917 append_insns (&buildaddr, i, buf);
1918
1919
1920 /* Remove stack that had been used for the collect_t object. */
1921 i = 0;
1922 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1923 append_insns (&buildaddr, i, buf);
1924
1925 i = 0;
1926 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1927 buf[i++] = 0xc4;
1928 buf[i++] = 0x04;
1929 buf[i++] = 0x17; /* pop %ss */
1930 buf[i++] = 0x0f; /* pop %gs */
1931 buf[i++] = 0xa9;
1932 buf[i++] = 0x0f; /* pop %fs */
1933 buf[i++] = 0xa1;
1934 buf[i++] = 0x07; /* pop %es */
1935 buf[i++] = 0x1f; /* pop %ds */
1936 buf[i++] = 0x9d; /* popf */
1937 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1938 buf[i++] = 0xc4;
1939 buf[i++] = 0x04;
1940 buf[i++] = 0x61; /* popad */
1941 append_insns (&buildaddr, i, buf);
1942
1943 /* Now, adjust the original instruction to execute in the jump
1944 pad. */
1945 *adjusted_insn_addr = buildaddr;
1946 relocate_instruction (&buildaddr, tpaddr);
1947 *adjusted_insn_addr_end = buildaddr;
1948
1949 /* Write the jump back to the program. */
1950 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1951 memcpy (buf, jump_insn, sizeof (jump_insn));
1952 memcpy (buf + 1, &offset, 4);
1953 append_insns (&buildaddr, sizeof (jump_insn), buf);
1954
1955 /* The jump pad is now built. Wire in a jump to our jump pad. This
1956 is always done last (by our caller actually), so that we can
1957 install fast tracepoints with threads running. This relies on
1958 the agent's atomic write support. */
1959 if (orig_size == 4)
1960 {
1961 /* Create a trampoline. */
1962 *trampoline_size = sizeof (jump_insn);
1963 if (!claim_trampoline_space (*trampoline_size, trampoline))
1964 {
1965 /* No trampoline space available. */
1966 strcpy (err,
1967 "E.Cannot allocate trampoline space needed for fast "
1968 "tracepoints on 4-byte instructions.");
1969 return 1;
1970 }
1971
1972 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1973 memcpy (buf, jump_insn, sizeof (jump_insn));
1974 memcpy (buf + 1, &offset, 4);
1975 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1976
1977 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1978 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1979 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1980 memcpy (buf + 2, &offset, 2);
1981 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1982 *jjump_pad_insn_size = sizeof (small_jump_insn);
1983 }
1984 else
1985 {
1986 /* Else use a 32-bit relative jump instruction. */
1987 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1988 memcpy (buf, jump_insn, sizeof (jump_insn));
1989 memcpy (buf + 1, &offset, 4);
1990 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1991 *jjump_pad_insn_size = sizeof (jump_insn);
1992 }
1993
1994 /* Return the end address of our pad. */
1995 *jump_entry = buildaddr;
1996
1997 return 0;
1998 }
1999
2000 static int
2001 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2002 CORE_ADDR collector,
2003 CORE_ADDR lockaddr,
2004 ULONGEST orig_size,
2005 CORE_ADDR *jump_entry,
2006 CORE_ADDR *trampoline,
2007 ULONGEST *trampoline_size,
2008 unsigned char *jjump_pad_insn,
2009 ULONGEST *jjump_pad_insn_size,
2010 CORE_ADDR *adjusted_insn_addr,
2011 CORE_ADDR *adjusted_insn_addr_end,
2012 char *err)
2013 {
2014 #ifdef __x86_64__
2015 if (is_64bit_tdesc ())
2016 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2017 collector, lockaddr,
2018 orig_size, jump_entry,
2019 trampoline, trampoline_size,
2020 jjump_pad_insn,
2021 jjump_pad_insn_size,
2022 adjusted_insn_addr,
2023 adjusted_insn_addr_end,
2024 err);
2025 #endif
2026
2027 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2028 collector, lockaddr,
2029 orig_size, jump_entry,
2030 trampoline, trampoline_size,
2031 jjump_pad_insn,
2032 jjump_pad_insn_size,
2033 adjusted_insn_addr,
2034 adjusted_insn_addr_end,
2035 err);
2036 }
2037
2038 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2039 architectures. */
2040
2041 static int
2042 x86_get_min_fast_tracepoint_insn_len (void)
2043 {
2044 static int warned_about_fast_tracepoints = 0;
2045
2046 #ifdef __x86_64__
2047 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2048 used for fast tracepoints. */
2049 if (is_64bit_tdesc ())
2050 return 5;
2051 #endif
2052
2053 if (agent_loaded_p ())
2054 {
2055 char errbuf[IPA_BUFSIZ];
2056
2057 errbuf[0] = '\0';
2058
2059 /* On x86, if trampolines are available, then 4-byte jump instructions
2060 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2061 with a 4-byte offset are used instead. */
2062 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2063 return 4;
2064 else
2065 {
2066 /* GDB has no channel to explain to user why a shorter fast
2067 tracepoint is not possible, but at least make GDBserver
2068 mention that something has gone awry. */
2069 if (!warned_about_fast_tracepoints)
2070 {
2071 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2072 warned_about_fast_tracepoints = 1;
2073 }
2074 return 5;
2075 }
2076 }
2077 else
2078 {
2079 /* Indicate that the minimum length is currently unknown since the IPA
2080 has not loaded yet. */
2081 return 0;
2082 }
2083 }
2084
2085 static void
2086 add_insns (unsigned char *start, int len)
2087 {
2088 CORE_ADDR buildaddr = current_insn_ptr;
2089
2090 if (debug_threads)
2091 debug_printf ("Adding %d bytes of insn at %s\n",
2092 len, paddress (buildaddr));
2093
2094 append_insns (&buildaddr, len, start);
2095 current_insn_ptr = buildaddr;
2096 }
2097
2098 /* Our general strategy for emitting code is to avoid specifying raw
2099 bytes whenever possible, and instead copy a block of inline asm
2100 that is embedded in the function. This is a little messy, because
2101 we need to keep the compiler from discarding what looks like dead
2102 code, plus suppress various warnings. */
2103
2104 #define EMIT_ASM(NAME, INSNS) \
2105 do \
2106 { \
2107 extern unsigned char start_ ## NAME, end_ ## NAME; \
2108 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2109 __asm__ ("jmp end_" #NAME "\n" \
2110 "\t" "start_" #NAME ":" \
2111 "\t" INSNS "\n" \
2112 "\t" "end_" #NAME ":"); \
2113 } while (0)
2114
2115 #ifdef __x86_64__
2116
2117 #define EMIT_ASM32(NAME,INSNS) \
2118 do \
2119 { \
2120 extern unsigned char start_ ## NAME, end_ ## NAME; \
2121 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2122 __asm__ (".code32\n" \
2123 "\t" "jmp end_" #NAME "\n" \
2124 "\t" "start_" #NAME ":\n" \
2125 "\t" INSNS "\n" \
2126 "\t" "end_" #NAME ":\n" \
2127 ".code64\n"); \
2128 } while (0)
2129
2130 #else
2131
2132 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2133
2134 #endif
2135
2136 #ifdef __x86_64__
2137
2138 static void
2139 amd64_emit_prologue (void)
2140 {
2141 EMIT_ASM (amd64_prologue,
2142 "pushq %rbp\n\t"
2143 "movq %rsp,%rbp\n\t"
2144 "sub $0x20,%rsp\n\t"
2145 "movq %rdi,-8(%rbp)\n\t"
2146 "movq %rsi,-16(%rbp)");
2147 }
2148
2149
2150 static void
2151 amd64_emit_epilogue (void)
2152 {
2153 EMIT_ASM (amd64_epilogue,
2154 "movq -16(%rbp),%rdi\n\t"
2155 "movq %rax,(%rdi)\n\t"
2156 "xor %rax,%rax\n\t"
2157 "leave\n\t"
2158 "ret");
2159 }
2160
2161 static void
2162 amd64_emit_add (void)
2163 {
2164 EMIT_ASM (amd64_add,
2165 "add (%rsp),%rax\n\t"
2166 "lea 0x8(%rsp),%rsp");
2167 }
2168
2169 static void
2170 amd64_emit_sub (void)
2171 {
2172 EMIT_ASM (amd64_sub,
2173 "sub %rax,(%rsp)\n\t"
2174 "pop %rax");
2175 }
2176
2177 static void
2178 amd64_emit_mul (void)
2179 {
2180 emit_error = 1;
2181 }
2182
2183 static void
2184 amd64_emit_lsh (void)
2185 {
2186 emit_error = 1;
2187 }
2188
2189 static void
2190 amd64_emit_rsh_signed (void)
2191 {
2192 emit_error = 1;
2193 }
2194
2195 static void
2196 amd64_emit_rsh_unsigned (void)
2197 {
2198 emit_error = 1;
2199 }
2200
2201 static void
2202 amd64_emit_ext (int arg)
2203 {
2204 switch (arg)
2205 {
2206 case 8:
2207 EMIT_ASM (amd64_ext_8,
2208 "cbtw\n\t"
2209 "cwtl\n\t"
2210 "cltq");
2211 break;
2212 case 16:
2213 EMIT_ASM (amd64_ext_16,
2214 "cwtl\n\t"
2215 "cltq");
2216 break;
2217 case 32:
2218 EMIT_ASM (amd64_ext_32,
2219 "cltq");
2220 break;
2221 default:
2222 emit_error = 1;
2223 }
2224 }
2225
2226 static void
2227 amd64_emit_log_not (void)
2228 {
2229 EMIT_ASM (amd64_log_not,
2230 "test %rax,%rax\n\t"
2231 "sete %cl\n\t"
2232 "movzbq %cl,%rax");
2233 }
2234
2235 static void
2236 amd64_emit_bit_and (void)
2237 {
2238 EMIT_ASM (amd64_and,
2239 "and (%rsp),%rax\n\t"
2240 "lea 0x8(%rsp),%rsp");
2241 }
2242
2243 static void
2244 amd64_emit_bit_or (void)
2245 {
2246 EMIT_ASM (amd64_or,
2247 "or (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2249 }
2250
2251 static void
2252 amd64_emit_bit_xor (void)
2253 {
2254 EMIT_ASM (amd64_xor,
2255 "xor (%rsp),%rax\n\t"
2256 "lea 0x8(%rsp),%rsp");
2257 }
2258
2259 static void
2260 amd64_emit_bit_not (void)
2261 {
2262 EMIT_ASM (amd64_bit_not,
2263 "xorq $0xffffffffffffffff,%rax");
2264 }
2265
2266 static void
2267 amd64_emit_equal (void)
2268 {
2269 EMIT_ASM (amd64_equal,
2270 "cmp %rax,(%rsp)\n\t"
2271 "je .Lamd64_equal_true\n\t"
2272 "xor %rax,%rax\n\t"
2273 "jmp .Lamd64_equal_end\n\t"
2274 ".Lamd64_equal_true:\n\t"
2275 "mov $0x1,%rax\n\t"
2276 ".Lamd64_equal_end:\n\t"
2277 "lea 0x8(%rsp),%rsp");
2278 }
2279
2280 static void
2281 amd64_emit_less_signed (void)
2282 {
2283 EMIT_ASM (amd64_less_signed,
2284 "cmp %rax,(%rsp)\n\t"
2285 "jl .Lamd64_less_signed_true\n\t"
2286 "xor %rax,%rax\n\t"
2287 "jmp .Lamd64_less_signed_end\n\t"
2288 ".Lamd64_less_signed_true:\n\t"
2289 "mov $1,%rax\n\t"
2290 ".Lamd64_less_signed_end:\n\t"
2291 "lea 0x8(%rsp),%rsp");
2292 }
2293
2294 static void
2295 amd64_emit_less_unsigned (void)
2296 {
2297 EMIT_ASM (amd64_less_unsigned,
2298 "cmp %rax,(%rsp)\n\t"
2299 "jb .Lamd64_less_unsigned_true\n\t"
2300 "xor %rax,%rax\n\t"
2301 "jmp .Lamd64_less_unsigned_end\n\t"
2302 ".Lamd64_less_unsigned_true:\n\t"
2303 "mov $1,%rax\n\t"
2304 ".Lamd64_less_unsigned_end:\n\t"
2305 "lea 0x8(%rsp),%rsp");
2306 }
2307
2308 static void
2309 amd64_emit_ref (int size)
2310 {
2311 switch (size)
2312 {
2313 case 1:
2314 EMIT_ASM (amd64_ref1,
2315 "movb (%rax),%al");
2316 break;
2317 case 2:
2318 EMIT_ASM (amd64_ref2,
2319 "movw (%rax),%ax");
2320 break;
2321 case 4:
2322 EMIT_ASM (amd64_ref4,
2323 "movl (%rax),%eax");
2324 break;
2325 case 8:
2326 EMIT_ASM (amd64_ref8,
2327 "movq (%rax),%rax");
2328 break;
2329 }
2330 }
2331
2332 static void
2333 amd64_emit_if_goto (int *offset_p, int *size_p)
2334 {
2335 EMIT_ASM (amd64_if_goto,
2336 "mov %rax,%rcx\n\t"
2337 "pop %rax\n\t"
2338 "cmp $0,%rcx\n\t"
2339 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2340 if (offset_p)
2341 *offset_p = 10;
2342 if (size_p)
2343 *size_p = 4;
2344 }
2345
2346 static void
2347 amd64_emit_goto (int *offset_p, int *size_p)
2348 {
2349 EMIT_ASM (amd64_goto,
2350 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2351 if (offset_p)
2352 *offset_p = 1;
2353 if (size_p)
2354 *size_p = 4;
2355 }
2356
2357 static void
2358 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2359 {
2360 int diff = (to - (from + size));
2361 unsigned char buf[sizeof (int)];
2362
2363 if (size != 4)
2364 {
2365 emit_error = 1;
2366 return;
2367 }
2368
2369 memcpy (buf, &diff, sizeof (int));
2370 write_inferior_memory (from, buf, sizeof (int));
2371 }
2372
2373 static void
2374 amd64_emit_const (LONGEST num)
2375 {
2376 unsigned char buf[16];
2377 int i;
2378 CORE_ADDR buildaddr = current_insn_ptr;
2379
2380 i = 0;
2381 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2382 memcpy (&buf[i], &num, sizeof (num));
2383 i += 8;
2384 append_insns (&buildaddr, i, buf);
2385 current_insn_ptr = buildaddr;
2386 }
2387
2388 static void
2389 amd64_emit_call (CORE_ADDR fn)
2390 {
2391 unsigned char buf[16];
2392 int i;
2393 CORE_ADDR buildaddr;
2394 LONGEST offset64;
2395
2396 /* The destination function being in the shared library, may be
2397 >31-bits away off the compiled code pad. */
2398
2399 buildaddr = current_insn_ptr;
2400
2401 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2402
2403 i = 0;
2404
2405 if (offset64 > INT_MAX || offset64 < INT_MIN)
2406 {
2407 /* Offset is too large for a call. Use callq, but that requires
2408 a register, so avoid it if possible. Use r10, since it is
2409 call-clobbered, we don't have to push/pop it. */
2410 buf[i++] = 0x48; /* mov $fn,%r10 */
2411 buf[i++] = 0xba;
2412 memcpy (buf + i, &fn, 8);
2413 i += 8;
2414 buf[i++] = 0xff; /* callq *%r10 */
2415 buf[i++] = 0xd2;
2416 }
2417 else
2418 {
2419 int offset32 = offset64; /* we know we can't overflow here. */
2420 memcpy (buf + i, &offset32, 4);
2421 i += 4;
2422 }
2423
2424 append_insns (&buildaddr, i, buf);
2425 current_insn_ptr = buildaddr;
2426 }
2427
2428 static void
2429 amd64_emit_reg (int reg)
2430 {
2431 unsigned char buf[16];
2432 int i;
2433 CORE_ADDR buildaddr;
2434
2435 /* Assume raw_regs is still in %rdi. */
2436 buildaddr = current_insn_ptr;
2437 i = 0;
2438 buf[i++] = 0xbe; /* mov $<n>,%esi */
2439 memcpy (&buf[i], &reg, sizeof (reg));
2440 i += 4;
2441 append_insns (&buildaddr, i, buf);
2442 current_insn_ptr = buildaddr;
2443 amd64_emit_call (get_raw_reg_func_addr ());
2444 }
2445
2446 static void
2447 amd64_emit_pop (void)
2448 {
2449 EMIT_ASM (amd64_pop,
2450 "pop %rax");
2451 }
2452
2453 static void
2454 amd64_emit_stack_flush (void)
2455 {
2456 EMIT_ASM (amd64_stack_flush,
2457 "push %rax");
2458 }
2459
2460 static void
2461 amd64_emit_zero_ext (int arg)
2462 {
2463 switch (arg)
2464 {
2465 case 8:
2466 EMIT_ASM (amd64_zero_ext_8,
2467 "and $0xff,%rax");
2468 break;
2469 case 16:
2470 EMIT_ASM (amd64_zero_ext_16,
2471 "and $0xffff,%rax");
2472 break;
2473 case 32:
2474 EMIT_ASM (amd64_zero_ext_32,
2475 "mov $0xffffffff,%rcx\n\t"
2476 "and %rcx,%rax");
2477 break;
2478 default:
2479 emit_error = 1;
2480 }
2481 }
2482
2483 static void
2484 amd64_emit_swap (void)
2485 {
2486 EMIT_ASM (amd64_swap,
2487 "mov %rax,%rcx\n\t"
2488 "pop %rax\n\t"
2489 "push %rcx");
2490 }
2491
2492 static void
2493 amd64_emit_stack_adjust (int n)
2494 {
2495 unsigned char buf[16];
2496 int i;
2497 CORE_ADDR buildaddr = current_insn_ptr;
2498
2499 i = 0;
2500 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2501 buf[i++] = 0x8d;
2502 buf[i++] = 0x64;
2503 buf[i++] = 0x24;
2504 /* This only handles adjustments up to 16, but we don't expect any more. */
2505 buf[i++] = n * 8;
2506 append_insns (&buildaddr, i, buf);
2507 current_insn_ptr = buildaddr;
2508 }
2509
2510 /* FN's prototype is `LONGEST(*fn)(int)'. */
2511
2512 static void
2513 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2514 {
2515 unsigned char buf[16];
2516 int i;
2517 CORE_ADDR buildaddr;
2518
2519 buildaddr = current_insn_ptr;
2520 i = 0;
2521 buf[i++] = 0xbf; /* movl $<n>,%edi */
2522 memcpy (&buf[i], &arg1, sizeof (arg1));
2523 i += 4;
2524 append_insns (&buildaddr, i, buf);
2525 current_insn_ptr = buildaddr;
2526 amd64_emit_call (fn);
2527 }
2528
2529 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2530
2531 static void
2532 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2533 {
2534 unsigned char buf[16];
2535 int i;
2536 CORE_ADDR buildaddr;
2537
2538 buildaddr = current_insn_ptr;
2539 i = 0;
2540 buf[i++] = 0xbf; /* movl $<n>,%edi */
2541 memcpy (&buf[i], &arg1, sizeof (arg1));
2542 i += 4;
2543 append_insns (&buildaddr, i, buf);
2544 current_insn_ptr = buildaddr;
2545 EMIT_ASM (amd64_void_call_2_a,
2546 /* Save away a copy of the stack top. */
2547 "push %rax\n\t"
2548 /* Also pass top as the second argument. */
2549 "mov %rax,%rsi");
2550 amd64_emit_call (fn);
2551 EMIT_ASM (amd64_void_call_2_b,
2552 /* Restore the stack top, %rax may have been trashed. */
2553 "pop %rax");
2554 }
2555
2556 void
2557 amd64_emit_eq_goto (int *offset_p, int *size_p)
2558 {
2559 EMIT_ASM (amd64_eq,
2560 "cmp %rax,(%rsp)\n\t"
2561 "jne .Lamd64_eq_fallthru\n\t"
2562 "lea 0x8(%rsp),%rsp\n\t"
2563 "pop %rax\n\t"
2564 /* jmp, but don't trust the assembler to choose the right jump */
2565 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2566 ".Lamd64_eq_fallthru:\n\t"
2567 "lea 0x8(%rsp),%rsp\n\t"
2568 "pop %rax");
2569
2570 if (offset_p)
2571 *offset_p = 13;
2572 if (size_p)
2573 *size_p = 4;
2574 }
2575
2576 void
2577 amd64_emit_ne_goto (int *offset_p, int *size_p)
2578 {
2579 EMIT_ASM (amd64_ne,
2580 "cmp %rax,(%rsp)\n\t"
2581 "je .Lamd64_ne_fallthru\n\t"
2582 "lea 0x8(%rsp),%rsp\n\t"
2583 "pop %rax\n\t"
2584 /* jmp, but don't trust the assembler to choose the right jump */
2585 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2586 ".Lamd64_ne_fallthru:\n\t"
2587 "lea 0x8(%rsp),%rsp\n\t"
2588 "pop %rax");
2589
2590 if (offset_p)
2591 *offset_p = 13;
2592 if (size_p)
2593 *size_p = 4;
2594 }
2595
2596 void
2597 amd64_emit_lt_goto (int *offset_p, int *size_p)
2598 {
2599 EMIT_ASM (amd64_lt,
2600 "cmp %rax,(%rsp)\n\t"
2601 "jnl .Lamd64_lt_fallthru\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2603 "pop %rax\n\t"
2604 /* jmp, but don't trust the assembler to choose the right jump */
2605 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2606 ".Lamd64_lt_fallthru:\n\t"
2607 "lea 0x8(%rsp),%rsp\n\t"
2608 "pop %rax");
2609
2610 if (offset_p)
2611 *offset_p = 13;
2612 if (size_p)
2613 *size_p = 4;
2614 }
2615
2616 void
2617 amd64_emit_le_goto (int *offset_p, int *size_p)
2618 {
2619 EMIT_ASM (amd64_le,
2620 "cmp %rax,(%rsp)\n\t"
2621 "jnle .Lamd64_le_fallthru\n\t"
2622 "lea 0x8(%rsp),%rsp\n\t"
2623 "pop %rax\n\t"
2624 /* jmp, but don't trust the assembler to choose the right jump */
2625 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2626 ".Lamd64_le_fallthru:\n\t"
2627 "lea 0x8(%rsp),%rsp\n\t"
2628 "pop %rax");
2629
2630 if (offset_p)
2631 *offset_p = 13;
2632 if (size_p)
2633 *size_p = 4;
2634 }
2635
2636 void
2637 amd64_emit_gt_goto (int *offset_p, int *size_p)
2638 {
2639 EMIT_ASM (amd64_gt,
2640 "cmp %rax,(%rsp)\n\t"
2641 "jng .Lamd64_gt_fallthru\n\t"
2642 "lea 0x8(%rsp),%rsp\n\t"
2643 "pop %rax\n\t"
2644 /* jmp, but don't trust the assembler to choose the right jump */
2645 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2646 ".Lamd64_gt_fallthru:\n\t"
2647 "lea 0x8(%rsp),%rsp\n\t"
2648 "pop %rax");
2649
2650 if (offset_p)
2651 *offset_p = 13;
2652 if (size_p)
2653 *size_p = 4;
2654 }
2655
2656 void
2657 amd64_emit_ge_goto (int *offset_p, int *size_p)
2658 {
2659 EMIT_ASM (amd64_ge,
2660 "cmp %rax,(%rsp)\n\t"
2661 "jnge .Lamd64_ge_fallthru\n\t"
2662 ".Lamd64_ge_jump:\n\t"
2663 "lea 0x8(%rsp),%rsp\n\t"
2664 "pop %rax\n\t"
2665 /* jmp, but don't trust the assembler to choose the right jump */
2666 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2667 ".Lamd64_ge_fallthru:\n\t"
2668 "lea 0x8(%rsp),%rsp\n\t"
2669 "pop %rax");
2670
2671 if (offset_p)
2672 *offset_p = 13;
2673 if (size_p)
2674 *size_p = 4;
2675 }
2676
2677 struct emit_ops amd64_emit_ops =
2678 {
2679 amd64_emit_prologue,
2680 amd64_emit_epilogue,
2681 amd64_emit_add,
2682 amd64_emit_sub,
2683 amd64_emit_mul,
2684 amd64_emit_lsh,
2685 amd64_emit_rsh_signed,
2686 amd64_emit_rsh_unsigned,
2687 amd64_emit_ext,
2688 amd64_emit_log_not,
2689 amd64_emit_bit_and,
2690 amd64_emit_bit_or,
2691 amd64_emit_bit_xor,
2692 amd64_emit_bit_not,
2693 amd64_emit_equal,
2694 amd64_emit_less_signed,
2695 amd64_emit_less_unsigned,
2696 amd64_emit_ref,
2697 amd64_emit_if_goto,
2698 amd64_emit_goto,
2699 amd64_write_goto_address,
2700 amd64_emit_const,
2701 amd64_emit_call,
2702 amd64_emit_reg,
2703 amd64_emit_pop,
2704 amd64_emit_stack_flush,
2705 amd64_emit_zero_ext,
2706 amd64_emit_swap,
2707 amd64_emit_stack_adjust,
2708 amd64_emit_int_call_1,
2709 amd64_emit_void_call_2,
2710 amd64_emit_eq_goto,
2711 amd64_emit_ne_goto,
2712 amd64_emit_lt_goto,
2713 amd64_emit_le_goto,
2714 amd64_emit_gt_goto,
2715 amd64_emit_ge_goto
2716 };
2717
2718 #endif /* __x86_64__ */
2719
2720 static void
2721 i386_emit_prologue (void)
2722 {
2723 EMIT_ASM32 (i386_prologue,
2724 "push %ebp\n\t"
2725 "mov %esp,%ebp\n\t"
2726 "push %ebx");
2727 /* At this point, the raw regs base address is at 8(%ebp), and the
2728 value pointer is at 12(%ebp). */
2729 }
2730
2731 static void
2732 i386_emit_epilogue (void)
2733 {
2734 EMIT_ASM32 (i386_epilogue,
2735 "mov 12(%ebp),%ecx\n\t"
2736 "mov %eax,(%ecx)\n\t"
2737 "mov %ebx,0x4(%ecx)\n\t"
2738 "xor %eax,%eax\n\t"
2739 "pop %ebx\n\t"
2740 "pop %ebp\n\t"
2741 "ret");
2742 }
2743
2744 static void
2745 i386_emit_add (void)
2746 {
2747 EMIT_ASM32 (i386_add,
2748 "add (%esp),%eax\n\t"
2749 "adc 0x4(%esp),%ebx\n\t"
2750 "lea 0x8(%esp),%esp");
2751 }
2752
2753 static void
2754 i386_emit_sub (void)
2755 {
2756 EMIT_ASM32 (i386_sub,
2757 "subl %eax,(%esp)\n\t"
2758 "sbbl %ebx,4(%esp)\n\t"
2759 "pop %eax\n\t"
2760 "pop %ebx\n\t");
2761 }
2762
2763 static void
2764 i386_emit_mul (void)
2765 {
2766 emit_error = 1;
2767 }
2768
2769 static void
2770 i386_emit_lsh (void)
2771 {
2772 emit_error = 1;
2773 }
2774
2775 static void
2776 i386_emit_rsh_signed (void)
2777 {
2778 emit_error = 1;
2779 }
2780
2781 static void
2782 i386_emit_rsh_unsigned (void)
2783 {
2784 emit_error = 1;
2785 }
2786
2787 static void
2788 i386_emit_ext (int arg)
2789 {
2790 switch (arg)
2791 {
2792 case 8:
2793 EMIT_ASM32 (i386_ext_8,
2794 "cbtw\n\t"
2795 "cwtl\n\t"
2796 "movl %eax,%ebx\n\t"
2797 "sarl $31,%ebx");
2798 break;
2799 case 16:
2800 EMIT_ASM32 (i386_ext_16,
2801 "cwtl\n\t"
2802 "movl %eax,%ebx\n\t"
2803 "sarl $31,%ebx");
2804 break;
2805 case 32:
2806 EMIT_ASM32 (i386_ext_32,
2807 "movl %eax,%ebx\n\t"
2808 "sarl $31,%ebx");
2809 break;
2810 default:
2811 emit_error = 1;
2812 }
2813 }
2814
2815 static void
2816 i386_emit_log_not (void)
2817 {
2818 EMIT_ASM32 (i386_log_not,
2819 "or %ebx,%eax\n\t"
2820 "test %eax,%eax\n\t"
2821 "sete %cl\n\t"
2822 "xor %ebx,%ebx\n\t"
2823 "movzbl %cl,%eax");
2824 }
2825
2826 static void
2827 i386_emit_bit_and (void)
2828 {
2829 EMIT_ASM32 (i386_and,
2830 "and (%esp),%eax\n\t"
2831 "and 0x4(%esp),%ebx\n\t"
2832 "lea 0x8(%esp),%esp");
2833 }
2834
2835 static void
2836 i386_emit_bit_or (void)
2837 {
2838 EMIT_ASM32 (i386_or,
2839 "or (%esp),%eax\n\t"
2840 "or 0x4(%esp),%ebx\n\t"
2841 "lea 0x8(%esp),%esp");
2842 }
2843
2844 static void
2845 i386_emit_bit_xor (void)
2846 {
2847 EMIT_ASM32 (i386_xor,
2848 "xor (%esp),%eax\n\t"
2849 "xor 0x4(%esp),%ebx\n\t"
2850 "lea 0x8(%esp),%esp");
2851 }
2852
2853 static void
2854 i386_emit_bit_not (void)
2855 {
2856 EMIT_ASM32 (i386_bit_not,
2857 "xor $0xffffffff,%eax\n\t"
2858 "xor $0xffffffff,%ebx\n\t");
2859 }
2860
2861 static void
2862 i386_emit_equal (void)
2863 {
2864 EMIT_ASM32 (i386_equal,
2865 "cmpl %ebx,4(%esp)\n\t"
2866 "jne .Li386_equal_false\n\t"
2867 "cmpl %eax,(%esp)\n\t"
2868 "je .Li386_equal_true\n\t"
2869 ".Li386_equal_false:\n\t"
2870 "xor %eax,%eax\n\t"
2871 "jmp .Li386_equal_end\n\t"
2872 ".Li386_equal_true:\n\t"
2873 "mov $1,%eax\n\t"
2874 ".Li386_equal_end:\n\t"
2875 "xor %ebx,%ebx\n\t"
2876 "lea 0x8(%esp),%esp");
2877 }
2878
2879 static void
2880 i386_emit_less_signed (void)
2881 {
2882 EMIT_ASM32 (i386_less_signed,
2883 "cmpl %ebx,4(%esp)\n\t"
2884 "jl .Li386_less_signed_true\n\t"
2885 "jne .Li386_less_signed_false\n\t"
2886 "cmpl %eax,(%esp)\n\t"
2887 "jl .Li386_less_signed_true\n\t"
2888 ".Li386_less_signed_false:\n\t"
2889 "xor %eax,%eax\n\t"
2890 "jmp .Li386_less_signed_end\n\t"
2891 ".Li386_less_signed_true:\n\t"
2892 "mov $1,%eax\n\t"
2893 ".Li386_less_signed_end:\n\t"
2894 "xor %ebx,%ebx\n\t"
2895 "lea 0x8(%esp),%esp");
2896 }
2897
2898 static void
2899 i386_emit_less_unsigned (void)
2900 {
2901 EMIT_ASM32 (i386_less_unsigned,
2902 "cmpl %ebx,4(%esp)\n\t"
2903 "jb .Li386_less_unsigned_true\n\t"
2904 "jne .Li386_less_unsigned_false\n\t"
2905 "cmpl %eax,(%esp)\n\t"
2906 "jb .Li386_less_unsigned_true\n\t"
2907 ".Li386_less_unsigned_false:\n\t"
2908 "xor %eax,%eax\n\t"
2909 "jmp .Li386_less_unsigned_end\n\t"
2910 ".Li386_less_unsigned_true:\n\t"
2911 "mov $1,%eax\n\t"
2912 ".Li386_less_unsigned_end:\n\t"
2913 "xor %ebx,%ebx\n\t"
2914 "lea 0x8(%esp),%esp");
2915 }
2916
2917 static void
2918 i386_emit_ref (int size)
2919 {
2920 switch (size)
2921 {
2922 case 1:
2923 EMIT_ASM32 (i386_ref1,
2924 "movb (%eax),%al");
2925 break;
2926 case 2:
2927 EMIT_ASM32 (i386_ref2,
2928 "movw (%eax),%ax");
2929 break;
2930 case 4:
2931 EMIT_ASM32 (i386_ref4,
2932 "movl (%eax),%eax");
2933 break;
2934 case 8:
2935 EMIT_ASM32 (i386_ref8,
2936 "movl 4(%eax),%ebx\n\t"
2937 "movl (%eax),%eax");
2938 break;
2939 }
2940 }
2941
2942 static void
2943 i386_emit_if_goto (int *offset_p, int *size_p)
2944 {
2945 EMIT_ASM32 (i386_if_goto,
2946 "mov %eax,%ecx\n\t"
2947 "or %ebx,%ecx\n\t"
2948 "pop %eax\n\t"
2949 "pop %ebx\n\t"
2950 "cmpl $0,%ecx\n\t"
2951 /* Don't trust the assembler to choose the right jump */
2952 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2953
2954 if (offset_p)
2955 *offset_p = 11; /* be sure that this matches the sequence above */
2956 if (size_p)
2957 *size_p = 4;
2958 }
2959
2960 static void
2961 i386_emit_goto (int *offset_p, int *size_p)
2962 {
2963 EMIT_ASM32 (i386_goto,
2964 /* Don't trust the assembler to choose the right jump */
2965 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2966 if (offset_p)
2967 *offset_p = 1;
2968 if (size_p)
2969 *size_p = 4;
2970 }
2971
2972 static void
2973 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2974 {
2975 int diff = (to - (from + size));
2976 unsigned char buf[sizeof (int)];
2977
2978 /* We're only doing 4-byte sizes at the moment. */
2979 if (size != 4)
2980 {
2981 emit_error = 1;
2982 return;
2983 }
2984
2985 memcpy (buf, &diff, sizeof (int));
2986 write_inferior_memory (from, buf, sizeof (int));
2987 }
2988
2989 static void
2990 i386_emit_const (LONGEST num)
2991 {
2992 unsigned char buf[16];
2993 int i, hi, lo;
2994 CORE_ADDR buildaddr = current_insn_ptr;
2995
2996 i = 0;
2997 buf[i++] = 0xb8; /* mov $<n>,%eax */
2998 lo = num & 0xffffffff;
2999 memcpy (&buf[i], &lo, sizeof (lo));
3000 i += 4;
3001 hi = ((num >> 32) & 0xffffffff);
3002 if (hi)
3003 {
3004 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3005 memcpy (&buf[i], &hi, sizeof (hi));
3006 i += 4;
3007 }
3008 else
3009 {
3010 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3011 }
3012 append_insns (&buildaddr, i, buf);
3013 current_insn_ptr = buildaddr;
3014 }
3015
3016 static void
3017 i386_emit_call (CORE_ADDR fn)
3018 {
3019 unsigned char buf[16];
3020 int i, offset;
3021 CORE_ADDR buildaddr;
3022
3023 buildaddr = current_insn_ptr;
3024 i = 0;
3025 buf[i++] = 0xe8; /* call <reladdr> */
3026 offset = ((int) fn) - (buildaddr + 5);
3027 memcpy (buf + 1, &offset, 4);
3028 append_insns (&buildaddr, 5, buf);
3029 current_insn_ptr = buildaddr;
3030 }
3031
3032 static void
3033 i386_emit_reg (int reg)
3034 {
3035 unsigned char buf[16];
3036 int i;
3037 CORE_ADDR buildaddr;
3038
3039 EMIT_ASM32 (i386_reg_a,
3040 "sub $0x8,%esp");
3041 buildaddr = current_insn_ptr;
3042 i = 0;
3043 buf[i++] = 0xb8; /* mov $<n>,%eax */
3044 memcpy (&buf[i], &reg, sizeof (reg));
3045 i += 4;
3046 append_insns (&buildaddr, i, buf);
3047 current_insn_ptr = buildaddr;
3048 EMIT_ASM32 (i386_reg_b,
3049 "mov %eax,4(%esp)\n\t"
3050 "mov 8(%ebp),%eax\n\t"
3051 "mov %eax,(%esp)");
3052 i386_emit_call (get_raw_reg_func_addr ());
3053 EMIT_ASM32 (i386_reg_c,
3054 "xor %ebx,%ebx\n\t"
3055 "lea 0x8(%esp),%esp");
3056 }
3057
3058 static void
3059 i386_emit_pop (void)
3060 {
3061 EMIT_ASM32 (i386_pop,
3062 "pop %eax\n\t"
3063 "pop %ebx");
3064 }
3065
3066 static void
3067 i386_emit_stack_flush (void)
3068 {
3069 EMIT_ASM32 (i386_stack_flush,
3070 "push %ebx\n\t"
3071 "push %eax");
3072 }
3073
3074 static void
3075 i386_emit_zero_ext (int arg)
3076 {
3077 switch (arg)
3078 {
3079 case 8:
3080 EMIT_ASM32 (i386_zero_ext_8,
3081 "and $0xff,%eax\n\t"
3082 "xor %ebx,%ebx");
3083 break;
3084 case 16:
3085 EMIT_ASM32 (i386_zero_ext_16,
3086 "and $0xffff,%eax\n\t"
3087 "xor %ebx,%ebx");
3088 break;
3089 case 32:
3090 EMIT_ASM32 (i386_zero_ext_32,
3091 "xor %ebx,%ebx");
3092 break;
3093 default:
3094 emit_error = 1;
3095 }
3096 }
3097
3098 static void
3099 i386_emit_swap (void)
3100 {
3101 EMIT_ASM32 (i386_swap,
3102 "mov %eax,%ecx\n\t"
3103 "mov %ebx,%edx\n\t"
3104 "pop %eax\n\t"
3105 "pop %ebx\n\t"
3106 "push %edx\n\t"
3107 "push %ecx");
3108 }
3109
3110 static void
3111 i386_emit_stack_adjust (int n)
3112 {
3113 unsigned char buf[16];
3114 int i;
3115 CORE_ADDR buildaddr = current_insn_ptr;
3116
3117 i = 0;
3118 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3119 buf[i++] = 0x64;
3120 buf[i++] = 0x24;
3121 buf[i++] = n * 8;
3122 append_insns (&buildaddr, i, buf);
3123 current_insn_ptr = buildaddr;
3124 }
3125
3126 /* FN's prototype is `LONGEST(*fn)(int)'. */
3127
3128 static void
3129 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3130 {
3131 unsigned char buf[16];
3132 int i;
3133 CORE_ADDR buildaddr;
3134
3135 EMIT_ASM32 (i386_int_call_1_a,
3136 /* Reserve a bit of stack space. */
3137 "sub $0x8,%esp");
3138 /* Put the one argument on the stack. */
3139 buildaddr = current_insn_ptr;
3140 i = 0;
3141 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3142 buf[i++] = 0x04;
3143 buf[i++] = 0x24;
3144 memcpy (&buf[i], &arg1, sizeof (arg1));
3145 i += 4;
3146 append_insns (&buildaddr, i, buf);
3147 current_insn_ptr = buildaddr;
3148 i386_emit_call (fn);
3149 EMIT_ASM32 (i386_int_call_1_c,
3150 "mov %edx,%ebx\n\t"
3151 "lea 0x8(%esp),%esp");
3152 }
3153
3154 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3155
3156 static void
3157 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3158 {
3159 unsigned char buf[16];
3160 int i;
3161 CORE_ADDR buildaddr;
3162
3163 EMIT_ASM32 (i386_void_call_2_a,
3164 /* Preserve %eax only; we don't have to worry about %ebx. */
3165 "push %eax\n\t"
3166 /* Reserve a bit of stack space for arguments. */
3167 "sub $0x10,%esp\n\t"
3168 /* Copy "top" to the second argument position. (Note that
3169 we can't assume function won't scribble on its
3170 arguments, so don't try to restore from this.) */
3171 "mov %eax,4(%esp)\n\t"
3172 "mov %ebx,8(%esp)");
3173 /* Put the first argument on the stack. */
3174 buildaddr = current_insn_ptr;
3175 i = 0;
3176 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3177 buf[i++] = 0x04;
3178 buf[i++] = 0x24;
3179 memcpy (&buf[i], &arg1, sizeof (arg1));
3180 i += 4;
3181 append_insns (&buildaddr, i, buf);
3182 current_insn_ptr = buildaddr;
3183 i386_emit_call (fn);
3184 EMIT_ASM32 (i386_void_call_2_b,
3185 "lea 0x10(%esp),%esp\n\t"
3186 /* Restore original stack top. */
3187 "pop %eax");
3188 }
3189
3190
3191 void
3192 i386_emit_eq_goto (int *offset_p, int *size_p)
3193 {
3194 EMIT_ASM32 (eq,
3195 /* Check low half first, more likely to be decider */
3196 "cmpl %eax,(%esp)\n\t"
3197 "jne .Leq_fallthru\n\t"
3198 "cmpl %ebx,4(%esp)\n\t"
3199 "jne .Leq_fallthru\n\t"
3200 "lea 0x8(%esp),%esp\n\t"
3201 "pop %eax\n\t"
3202 "pop %ebx\n\t"
3203 /* jmp, but don't trust the assembler to choose the right jump */
3204 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3205 ".Leq_fallthru:\n\t"
3206 "lea 0x8(%esp),%esp\n\t"
3207 "pop %eax\n\t"
3208 "pop %ebx");
3209
3210 if (offset_p)
3211 *offset_p = 18;
3212 if (size_p)
3213 *size_p = 4;
3214 }
3215
3216 void
3217 i386_emit_ne_goto (int *offset_p, int *size_p)
3218 {
3219 EMIT_ASM32 (ne,
3220 /* Check low half first, more likely to be decider */
3221 "cmpl %eax,(%esp)\n\t"
3222 "jne .Lne_jump\n\t"
3223 "cmpl %ebx,4(%esp)\n\t"
3224 "je .Lne_fallthru\n\t"
3225 ".Lne_jump:\n\t"
3226 "lea 0x8(%esp),%esp\n\t"
3227 "pop %eax\n\t"
3228 "pop %ebx\n\t"
3229 /* jmp, but don't trust the assembler to choose the right jump */
3230 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3231 ".Lne_fallthru:\n\t"
3232 "lea 0x8(%esp),%esp\n\t"
3233 "pop %eax\n\t"
3234 "pop %ebx");
3235
3236 if (offset_p)
3237 *offset_p = 18;
3238 if (size_p)
3239 *size_p = 4;
3240 }
3241
3242 void
3243 i386_emit_lt_goto (int *offset_p, int *size_p)
3244 {
3245 EMIT_ASM32 (lt,
3246 "cmpl %ebx,4(%esp)\n\t"
3247 "jl .Llt_jump\n\t"
3248 "jne .Llt_fallthru\n\t"
3249 "cmpl %eax,(%esp)\n\t"
3250 "jnl .Llt_fallthru\n\t"
3251 ".Llt_jump:\n\t"
3252 "lea 0x8(%esp),%esp\n\t"
3253 "pop %eax\n\t"
3254 "pop %ebx\n\t"
3255 /* jmp, but don't trust the assembler to choose the right jump */
3256 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3257 ".Llt_fallthru:\n\t"
3258 "lea 0x8(%esp),%esp\n\t"
3259 "pop %eax\n\t"
3260 "pop %ebx");
3261
3262 if (offset_p)
3263 *offset_p = 20;
3264 if (size_p)
3265 *size_p = 4;
3266 }
3267
3268 void
3269 i386_emit_le_goto (int *offset_p, int *size_p)
3270 {
3271 EMIT_ASM32 (le,
3272 "cmpl %ebx,4(%esp)\n\t"
3273 "jle .Lle_jump\n\t"
3274 "jne .Lle_fallthru\n\t"
3275 "cmpl %eax,(%esp)\n\t"
3276 "jnle .Lle_fallthru\n\t"
3277 ".Lle_jump:\n\t"
3278 "lea 0x8(%esp),%esp\n\t"
3279 "pop %eax\n\t"
3280 "pop %ebx\n\t"
3281 /* jmp, but don't trust the assembler to choose the right jump */
3282 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3283 ".Lle_fallthru:\n\t"
3284 "lea 0x8(%esp),%esp\n\t"
3285 "pop %eax\n\t"
3286 "pop %ebx");
3287
3288 if (offset_p)
3289 *offset_p = 20;
3290 if (size_p)
3291 *size_p = 4;
3292 }
3293
3294 void
3295 i386_emit_gt_goto (int *offset_p, int *size_p)
3296 {
3297 EMIT_ASM32 (gt,
3298 "cmpl %ebx,4(%esp)\n\t"
3299 "jg .Lgt_jump\n\t"
3300 "jne .Lgt_fallthru\n\t"
3301 "cmpl %eax,(%esp)\n\t"
3302 "jng .Lgt_fallthru\n\t"
3303 ".Lgt_jump:\n\t"
3304 "lea 0x8(%esp),%esp\n\t"
3305 "pop %eax\n\t"
3306 "pop %ebx\n\t"
3307 /* jmp, but don't trust the assembler to choose the right jump */
3308 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3309 ".Lgt_fallthru:\n\t"
3310 "lea 0x8(%esp),%esp\n\t"
3311 "pop %eax\n\t"
3312 "pop %ebx");
3313
3314 if (offset_p)
3315 *offset_p = 20;
3316 if (size_p)
3317 *size_p = 4;
3318 }
3319
3320 void
3321 i386_emit_ge_goto (int *offset_p, int *size_p)
3322 {
3323 EMIT_ASM32 (ge,
3324 "cmpl %ebx,4(%esp)\n\t"
3325 "jge .Lge_jump\n\t"
3326 "jne .Lge_fallthru\n\t"
3327 "cmpl %eax,(%esp)\n\t"
3328 "jnge .Lge_fallthru\n\t"
3329 ".Lge_jump:\n\t"
3330 "lea 0x8(%esp),%esp\n\t"
3331 "pop %eax\n\t"
3332 "pop %ebx\n\t"
3333 /* jmp, but don't trust the assembler to choose the right jump */
3334 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3335 ".Lge_fallthru:\n\t"
3336 "lea 0x8(%esp),%esp\n\t"
3337 "pop %eax\n\t"
3338 "pop %ebx");
3339
3340 if (offset_p)
3341 *offset_p = 20;
3342 if (size_p)
3343 *size_p = 4;
3344 }
3345
3346 struct emit_ops i386_emit_ops =
3347 {
3348 i386_emit_prologue,
3349 i386_emit_epilogue,
3350 i386_emit_add,
3351 i386_emit_sub,
3352 i386_emit_mul,
3353 i386_emit_lsh,
3354 i386_emit_rsh_signed,
3355 i386_emit_rsh_unsigned,
3356 i386_emit_ext,
3357 i386_emit_log_not,
3358 i386_emit_bit_and,
3359 i386_emit_bit_or,
3360 i386_emit_bit_xor,
3361 i386_emit_bit_not,
3362 i386_emit_equal,
3363 i386_emit_less_signed,
3364 i386_emit_less_unsigned,
3365 i386_emit_ref,
3366 i386_emit_if_goto,
3367 i386_emit_goto,
3368 i386_write_goto_address,
3369 i386_emit_const,
3370 i386_emit_call,
3371 i386_emit_reg,
3372 i386_emit_pop,
3373 i386_emit_stack_flush,
3374 i386_emit_zero_ext,
3375 i386_emit_swap,
3376 i386_emit_stack_adjust,
3377 i386_emit_int_call_1,
3378 i386_emit_void_call_2,
3379 i386_emit_eq_goto,
3380 i386_emit_ne_goto,
3381 i386_emit_lt_goto,
3382 i386_emit_le_goto,
3383 i386_emit_gt_goto,
3384 i386_emit_ge_goto
3385 };
3386
3387
3388 static struct emit_ops *
3389 x86_emit_ops (void)
3390 {
3391 #ifdef __x86_64__
3392 if (is_64bit_tdesc ())
3393 return &amd64_emit_ops;
3394 else
3395 #endif
3396 return &i386_emit_ops;
3397 }
3398
3399 static int
3400 x86_supports_range_stepping (void)
3401 {
3402 return 1;
3403 }
3404
3405 /* This is initialized assuming an amd64 target.
3406 x86_arch_setup will correct it for i386 or amd64 targets. */
3407
3408 struct linux_target_ops the_low_target =
3409 {
3410 x86_arch_setup,
3411 x86_linux_regs_info,
3412 x86_cannot_fetch_register,
3413 x86_cannot_store_register,
3414 NULL, /* fetch_register */
3415 x86_get_pc,
3416 x86_set_pc,
3417 x86_breakpoint,
3418 x86_breakpoint_len,
3419 NULL,
3420 1,
3421 x86_breakpoint_at,
3422 x86_supports_z_point_type,
3423 x86_insert_point,
3424 x86_remove_point,
3425 x86_stopped_by_watchpoint,
3426 x86_stopped_data_address,
3427 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3428 native i386 case (no registers smaller than an xfer unit), and are not
3429 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3430 NULL,
3431 NULL,
3432 /* need to fix up i386 siginfo if host is amd64 */
3433 x86_siginfo_fixup,
3434 x86_linux_new_process,
3435 x86_linux_new_thread,
3436 x86_linux_prepare_to_resume,
3437 x86_linux_process_qsupported,
3438 x86_supports_tracepoints,
3439 x86_get_thread_area,
3440 x86_install_fast_tracepoint_jump_pad,
3441 x86_emit_ops,
3442 x86_get_min_fast_tracepoint_insn_len,
3443 x86_supports_range_stepping,
3444 };
3445
3446 void
3447 initialize_low_arch (void)
3448 {
3449 /* Initialize the Linux target descriptions. */
3450 #ifdef __x86_64__
3451 init_registers_amd64_linux ();
3452 init_registers_amd64_avx_linux ();
3453 init_registers_amd64_avx512_linux ();
3454 init_registers_amd64_mpx_linux ();
3455
3456 init_registers_x32_linux ();
3457 init_registers_x32_avx_linux ();
3458 init_registers_x32_avx512_linux ();
3459
3460 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3461 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3462 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3463 #endif
3464 init_registers_i386_linux ();
3465 init_registers_i386_mmx_linux ();
3466 init_registers_i386_avx_linux ();
3467 init_registers_i386_avx512_linux ();
3468 init_registers_i386_mpx_linux ();
3469
3470 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3471 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3472 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3473
3474 initialize_regsets_info (&x86_regsets_info);
3475 }
This page took 0.096848 seconds and 5 git commands to generate.