gdbsupport: re-indent ptrace.m4
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2020 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52
53 /* Note that the AMD64 architecture was previously known as x86-64.
54 The latter is (forever) engraved into the canonical system name as
55 returned by config.guess, and used as the name for the AMD64 port
56 of GNU/Linux. The BSD's have renamed their ports to amd64; they
57 don't like to shout. For GDB we prefer the amd64_-prefix over the
58 x86_64_-prefix since it's so much easier to type. */
59
60 /* Register information. */
61
62 static const char * const amd64_register_names[] =
63 {
64 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
65
66 /* %r8 is indeed register number 8. */
67 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
68 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
69
70 /* %st0 is register number 24. */
71 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
72 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
73
74 /* %xmm0 is register number 40. */
75 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
76 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
77 "mxcsr",
78 };
79
80 static const char * const amd64_ymm_names[] =
81 {
82 "ymm0", "ymm1", "ymm2", "ymm3",
83 "ymm4", "ymm5", "ymm6", "ymm7",
84 "ymm8", "ymm9", "ymm10", "ymm11",
85 "ymm12", "ymm13", "ymm14", "ymm15"
86 };
87
88 static const char * const amd64_ymm_avx512_names[] =
89 {
90 "ymm16", "ymm17", "ymm18", "ymm19",
91 "ymm20", "ymm21", "ymm22", "ymm23",
92 "ymm24", "ymm25", "ymm26", "ymm27",
93 "ymm28", "ymm29", "ymm30", "ymm31"
94 };
95
96 static const char * const amd64_ymmh_names[] =
97 {
98 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
99 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
100 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
101 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
102 };
103
104 static const char * const amd64_ymmh_avx512_names[] =
105 {
106 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
107 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
108 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
109 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
110 };
111
112 static const char * const amd64_mpx_names[] =
113 {
114 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
115 };
116
117 static const char * const amd64_k_names[] =
118 {
119 "k0", "k1", "k2", "k3",
120 "k4", "k5", "k6", "k7"
121 };
122
123 static const char * const amd64_zmmh_names[] =
124 {
125 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
126 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
127 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
128 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
129 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
130 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
131 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
132 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
133 };
134
135 static const char * const amd64_zmm_names[] =
136 {
137 "zmm0", "zmm1", "zmm2", "zmm3",
138 "zmm4", "zmm5", "zmm6", "zmm7",
139 "zmm8", "zmm9", "zmm10", "zmm11",
140 "zmm12", "zmm13", "zmm14", "zmm15",
141 "zmm16", "zmm17", "zmm18", "zmm19",
142 "zmm20", "zmm21", "zmm22", "zmm23",
143 "zmm24", "zmm25", "zmm26", "zmm27",
144 "zmm28", "zmm29", "zmm30", "zmm31"
145 };
146
147 static const char * const amd64_xmm_avx512_names[] = {
148 "xmm16", "xmm17", "xmm18", "xmm19",
149 "xmm20", "xmm21", "xmm22", "xmm23",
150 "xmm24", "xmm25", "xmm26", "xmm27",
151 "xmm28", "xmm29", "xmm30", "xmm31"
152 };
153
154 static const char * const amd64_pkeys_names[] = {
155 "pkru"
156 };
157
158 /* DWARF Register Number Mapping as defined in the System V psABI,
159 section 3.6. */
160
161 static int amd64_dwarf_regmap[] =
162 {
163 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
164 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
165 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
166 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
167
168 /* Frame Pointer Register RBP. */
169 AMD64_RBP_REGNUM,
170
171 /* Stack Pointer Register RSP. */
172 AMD64_RSP_REGNUM,
173
174 /* Extended Integer Registers 8 - 15. */
175 AMD64_R8_REGNUM, /* %r8 */
176 AMD64_R9_REGNUM, /* %r9 */
177 AMD64_R10_REGNUM, /* %r10 */
178 AMD64_R11_REGNUM, /* %r11 */
179 AMD64_R12_REGNUM, /* %r12 */
180 AMD64_R13_REGNUM, /* %r13 */
181 AMD64_R14_REGNUM, /* %r14 */
182 AMD64_R15_REGNUM, /* %r15 */
183
184 /* Return Address RA. Mapped to RIP. */
185 AMD64_RIP_REGNUM,
186
187 /* SSE Registers 0 - 7. */
188 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
189 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
190 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
191 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
192
193 /* Extended SSE Registers 8 - 15. */
194 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
195 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
196 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
197 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
198
199 /* Floating Point Registers 0-7. */
200 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
201 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
202 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
203 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
204
205 /* MMX Registers 0 - 7.
206 We have to handle those registers specifically, as their register
207 number within GDB depends on the target (or they may even not be
208 available at all). */
209 -1, -1, -1, -1, -1, -1, -1, -1,
210
211 /* Control and Status Flags Register. */
212 AMD64_EFLAGS_REGNUM,
213
214 /* Selector Registers. */
215 AMD64_ES_REGNUM,
216 AMD64_CS_REGNUM,
217 AMD64_SS_REGNUM,
218 AMD64_DS_REGNUM,
219 AMD64_FS_REGNUM,
220 AMD64_GS_REGNUM,
221 -1,
222 -1,
223
224 /* Segment Base Address Registers. */
225 -1,
226 -1,
227 -1,
228 -1,
229
230 /* Special Selector Registers. */
231 -1,
232 -1,
233
234 /* Floating Point Control Registers. */
235 AMD64_MXCSR_REGNUM,
236 AMD64_FCTRL_REGNUM,
237 AMD64_FSTAT_REGNUM
238 };
239
240 static const int amd64_dwarf_regmap_len =
241 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
242
243 /* Convert DWARF register number REG to the appropriate register
244 number used by GDB. */
245
246 static int
247 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
248 {
249 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
250 int ymm0_regnum = tdep->ymm0_regnum;
251 int regnum = -1;
252
253 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
254 regnum = amd64_dwarf_regmap[reg];
255
256 if (ymm0_regnum >= 0
257 && i386_xmm_regnum_p (gdbarch, regnum))
258 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
259
260 return regnum;
261 }
262
263 /* Map architectural register numbers to gdb register numbers. */
264
265 static const int amd64_arch_regmap[16] =
266 {
267 AMD64_RAX_REGNUM, /* %rax */
268 AMD64_RCX_REGNUM, /* %rcx */
269 AMD64_RDX_REGNUM, /* %rdx */
270 AMD64_RBX_REGNUM, /* %rbx */
271 AMD64_RSP_REGNUM, /* %rsp */
272 AMD64_RBP_REGNUM, /* %rbp */
273 AMD64_RSI_REGNUM, /* %rsi */
274 AMD64_RDI_REGNUM, /* %rdi */
275 AMD64_R8_REGNUM, /* %r8 */
276 AMD64_R9_REGNUM, /* %r9 */
277 AMD64_R10_REGNUM, /* %r10 */
278 AMD64_R11_REGNUM, /* %r11 */
279 AMD64_R12_REGNUM, /* %r12 */
280 AMD64_R13_REGNUM, /* %r13 */
281 AMD64_R14_REGNUM, /* %r14 */
282 AMD64_R15_REGNUM /* %r15 */
283 };
284
285 static const int amd64_arch_regmap_len =
286 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
287
288 /* Convert architectural register number REG to the appropriate register
289 number used by GDB. */
290
291 static int
292 amd64_arch_reg_to_regnum (int reg)
293 {
294 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
295
296 return amd64_arch_regmap[reg];
297 }
298
299 /* Register names for byte pseudo-registers. */
300
301 static const char * const amd64_byte_names[] =
302 {
303 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
304 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
305 "ah", "bh", "ch", "dh"
306 };
307
308 /* Number of lower byte registers. */
309 #define AMD64_NUM_LOWER_BYTE_REGS 16
310
311 /* Register names for word pseudo-registers. */
312
313 static const char * const amd64_word_names[] =
314 {
315 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
316 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
317 };
318
319 /* Register names for dword pseudo-registers. */
320
321 static const char * const amd64_dword_names[] =
322 {
323 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
324 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
325 "eip"
326 };
327
328 /* Return the name of register REGNUM. */
329
330 static const char *
331 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
332 {
333 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
334 if (i386_byte_regnum_p (gdbarch, regnum))
335 return amd64_byte_names[regnum - tdep->al_regnum];
336 else if (i386_zmm_regnum_p (gdbarch, regnum))
337 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
338 else if (i386_ymm_regnum_p (gdbarch, regnum))
339 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
340 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
341 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
342 else if (i386_word_regnum_p (gdbarch, regnum))
343 return amd64_word_names[regnum - tdep->ax_regnum];
344 else if (i386_dword_regnum_p (gdbarch, regnum))
345 return amd64_dword_names[regnum - tdep->eax_regnum];
346 else
347 return i386_pseudo_register_name (gdbarch, regnum);
348 }
349
350 static struct value *
351 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
352 readable_regcache *regcache,
353 int regnum)
354 {
355 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
356
357 value *result_value = allocate_value (register_type (gdbarch, regnum));
358 VALUE_LVAL (result_value) = lval_register;
359 VALUE_REGNUM (result_value) = regnum;
360 gdb_byte *buf = value_contents_raw (result_value);
361
362 if (i386_byte_regnum_p (gdbarch, regnum))
363 {
364 int gpnum = regnum - tdep->al_regnum;
365
366 /* Extract (always little endian). */
367 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
368 {
369 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
370 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
371
372 /* Special handling for AH, BH, CH, DH. */
373 register_status status = regcache->raw_read (gpnum, raw_buf);
374 if (status == REG_VALID)
375 memcpy (buf, raw_buf + 1, 1);
376 else
377 mark_value_bytes_unavailable (result_value, 0,
378 TYPE_LENGTH (value_type (result_value)));
379 }
380 else
381 {
382 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
383 register_status status = regcache->raw_read (gpnum, raw_buf);
384 if (status == REG_VALID)
385 memcpy (buf, raw_buf, 1);
386 else
387 mark_value_bytes_unavailable (result_value, 0,
388 TYPE_LENGTH (value_type (result_value)));
389 }
390 }
391 else if (i386_dword_regnum_p (gdbarch, regnum))
392 {
393 int gpnum = regnum - tdep->eax_regnum;
394 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
395 /* Extract (always little endian). */
396 register_status status = regcache->raw_read (gpnum, raw_buf);
397 if (status == REG_VALID)
398 memcpy (buf, raw_buf, 4);
399 else
400 mark_value_bytes_unavailable (result_value, 0,
401 TYPE_LENGTH (value_type (result_value)));
402 }
403 else
404 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
405 result_value);
406
407 return result_value;
408 }
409
410 static void
411 amd64_pseudo_register_write (struct gdbarch *gdbarch,
412 struct regcache *regcache,
413 int regnum, const gdb_byte *buf)
414 {
415 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
416
417 if (i386_byte_regnum_p (gdbarch, regnum))
418 {
419 int gpnum = regnum - tdep->al_regnum;
420
421 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
422 {
423 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
424 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
425
426 /* Read ... AH, BH, CH, DH. */
427 regcache->raw_read (gpnum, raw_buf);
428 /* ... Modify ... (always little endian). */
429 memcpy (raw_buf + 1, buf, 1);
430 /* ... Write. */
431 regcache->raw_write (gpnum, raw_buf);
432 }
433 else
434 {
435 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
436
437 /* Read ... */
438 regcache->raw_read (gpnum, raw_buf);
439 /* ... Modify ... (always little endian). */
440 memcpy (raw_buf, buf, 1);
441 /* ... Write. */
442 regcache->raw_write (gpnum, raw_buf);
443 }
444 }
445 else if (i386_dword_regnum_p (gdbarch, regnum))
446 {
447 int gpnum = regnum - tdep->eax_regnum;
448 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
449
450 /* Read ... */
451 regcache->raw_read (gpnum, raw_buf);
452 /* ... Modify ... (always little endian). */
453 memcpy (raw_buf, buf, 4);
454 /* ... Write. */
455 regcache->raw_write (gpnum, raw_buf);
456 }
457 else
458 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
459 }
460
461 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
462
463 static int
464 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
465 struct agent_expr *ax, int regnum)
466 {
467 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
468
469 if (i386_byte_regnum_p (gdbarch, regnum))
470 {
471 int gpnum = regnum - tdep->al_regnum;
472
473 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
474 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
475 else
476 ax_reg_mask (ax, gpnum);
477 return 0;
478 }
479 else if (i386_dword_regnum_p (gdbarch, regnum))
480 {
481 int gpnum = regnum - tdep->eax_regnum;
482
483 ax_reg_mask (ax, gpnum);
484 return 0;
485 }
486 else
487 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
488 }
489
490 \f
491
492 /* Register classes as defined in the psABI. */
493
494 enum amd64_reg_class
495 {
496 AMD64_INTEGER,
497 AMD64_SSE,
498 AMD64_SSEUP,
499 AMD64_X87,
500 AMD64_X87UP,
501 AMD64_COMPLEX_X87,
502 AMD64_NO_CLASS,
503 AMD64_MEMORY
504 };
505
506 /* Return the union class of CLASS1 and CLASS2. See the psABI for
507 details. */
508
509 static enum amd64_reg_class
510 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
511 {
512 /* Rule (a): If both classes are equal, this is the resulting class. */
513 if (class1 == class2)
514 return class1;
515
516 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
517 is the other class. */
518 if (class1 == AMD64_NO_CLASS)
519 return class2;
520 if (class2 == AMD64_NO_CLASS)
521 return class1;
522
523 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
524 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
525 return AMD64_MEMORY;
526
527 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
528 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
529 return AMD64_INTEGER;
530
531 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
532 MEMORY is used as class. */
533 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
534 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
535 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
536 return AMD64_MEMORY;
537
538 /* Rule (f): Otherwise class SSE is used. */
539 return AMD64_SSE;
540 }
541
542 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
543
544 /* Return true if TYPE is a structure or union with unaligned fields. */
545
546 static bool
547 amd64_has_unaligned_fields (struct type *type)
548 {
549 if (type->code () == TYPE_CODE_STRUCT
550 || type->code () == TYPE_CODE_UNION)
551 {
552 for (int i = 0; i < type->num_fields (); i++)
553 {
554 struct type *subtype = check_typedef (type->field (i).type ());
555 int bitpos = TYPE_FIELD_BITPOS (type, i);
556 int align = type_align(subtype);
557
558 /* Ignore static fields, empty fields (for example nested
559 empty structures), and bitfields (these are handled by
560 the caller). */
561 if (field_is_static (&type->field (i))
562 || (TYPE_FIELD_BITSIZE (type, i) == 0
563 && TYPE_LENGTH (subtype) == 0)
564 || TYPE_FIELD_PACKED (type, i))
565 continue;
566
567 if (bitpos % 8 != 0)
568 return true;
569
570 int bytepos = bitpos / 8;
571 if (bytepos % align != 0)
572 return true;
573
574 if (amd64_has_unaligned_fields (subtype))
575 return true;
576 }
577 }
578
579 return false;
580 }
581
582 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
583 structures and union types, and store the result in THECLASS. */
584
585 static void
586 amd64_classify_aggregate_field (struct type *type, int i,
587 enum amd64_reg_class theclass[2],
588 unsigned int bitoffset)
589 {
590 struct type *subtype = check_typedef (type->field (i).type ());
591 int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i);
592 int pos = bitpos / 64;
593 enum amd64_reg_class subclass[2];
594 int bitsize = TYPE_FIELD_BITSIZE (type, i);
595 int endpos;
596
597 if (bitsize == 0)
598 bitsize = TYPE_LENGTH (subtype) * 8;
599 endpos = (bitpos + bitsize - 1) / 64;
600
601 /* Ignore static fields, or empty fields, for example nested
602 empty structures.*/
603 if (field_is_static (&type->field (i)) || bitsize == 0)
604 return;
605
606 if (subtype->code () == TYPE_CODE_STRUCT
607 || subtype->code () == TYPE_CODE_UNION)
608 {
609 /* Each field of an object is classified recursively. */
610 int j;
611 for (j = 0; j < subtype->num_fields (); j++)
612 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
613 return;
614 }
615
616 gdb_assert (pos == 0 || pos == 1);
617
618 amd64_classify (subtype, subclass);
619 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
620 if (bitsize <= 64 && pos == 0 && endpos == 1)
621 /* This is a bit of an odd case: We have a field that would
622 normally fit in one of the two eightbytes, except that
623 it is placed in a way that this field straddles them.
624 This has been seen with a structure containing an array.
625
626 The ABI is a bit unclear in this case, but we assume that
627 this field's class (stored in subclass[0]) must also be merged
628 into class[1]. In other words, our field has a piece stored
629 in the second eight-byte, and thus its class applies to
630 the second eight-byte as well.
631
632 In the case where the field length exceeds 8 bytes,
633 it should not be necessary to merge the field class
634 into class[1]. As LEN > 8, subclass[1] is necessarily
635 different from AMD64_NO_CLASS. If subclass[1] is equal
636 to subclass[0], then the normal class[1]/subclass[1]
637 merging will take care of everything. For subclass[1]
638 to be different from subclass[0], I can only see the case
639 where we have a SSE/SSEUP or X87/X87UP pair, which both
640 use up all 16 bytes of the aggregate, and are already
641 handled just fine (because each portion sits on its own
642 8-byte). */
643 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
644 if (pos == 0)
645 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
646 }
647
648 /* Classify TYPE according to the rules for aggregate (structures and
649 arrays) and union types, and store the result in CLASS. */
650
651 static void
652 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
653 {
654 /* 1. If the size of an object is larger than two eightbytes, or it has
655 unaligned fields, it has class memory. */
656 if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
657 {
658 theclass[0] = theclass[1] = AMD64_MEMORY;
659 return;
660 }
661
662 /* 2. Both eightbytes get initialized to class NO_CLASS. */
663 theclass[0] = theclass[1] = AMD64_NO_CLASS;
664
665 /* 3. Each field of an object is classified recursively so that
666 always two fields are considered. The resulting class is
667 calculated according to the classes of the fields in the
668 eightbyte: */
669
670 if (type->code () == TYPE_CODE_ARRAY)
671 {
672 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
673
674 /* All fields in an array have the same type. */
675 amd64_classify (subtype, theclass);
676 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
677 theclass[1] = theclass[0];
678 }
679 else
680 {
681 int i;
682
683 /* Structure or union. */
684 gdb_assert (type->code () == TYPE_CODE_STRUCT
685 || type->code () == TYPE_CODE_UNION);
686
687 for (i = 0; i < type->num_fields (); i++)
688 amd64_classify_aggregate_field (type, i, theclass, 0);
689 }
690
691 /* 4. Then a post merger cleanup is done: */
692
693 /* Rule (a): If one of the classes is MEMORY, the whole argument is
694 passed in memory. */
695 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
696 theclass[0] = theclass[1] = AMD64_MEMORY;
697
698 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
699 SSE. */
700 if (theclass[0] == AMD64_SSEUP)
701 theclass[0] = AMD64_SSE;
702 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
703 theclass[1] = AMD64_SSE;
704 }
705
706 /* Classify TYPE, and store the result in CLASS. */
707
708 static void
709 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
710 {
711 enum type_code code = type->code ();
712 int len = TYPE_LENGTH (type);
713
714 theclass[0] = theclass[1] = AMD64_NO_CLASS;
715
716 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
717 long, long long, and pointers are in the INTEGER class. Similarly,
718 range types, used by languages such as Ada, are also in the INTEGER
719 class. */
720 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
721 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
722 || code == TYPE_CODE_CHAR
723 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
724 && (len == 1 || len == 2 || len == 4 || len == 8))
725 theclass[0] = AMD64_INTEGER;
726
727 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
728 are in class SSE. */
729 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
730 && (len == 4 || len == 8))
731 /* FIXME: __m64 . */
732 theclass[0] = AMD64_SSE;
733
734 /* Arguments of types __float128, _Decimal128 and __m128 are split into
735 two halves. The least significant ones belong to class SSE, the most
736 significant one to class SSEUP. */
737 else if (code == TYPE_CODE_DECFLOAT && len == 16)
738 /* FIXME: __float128, __m128. */
739 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
740
741 /* The 64-bit mantissa of arguments of type long double belongs to
742 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
743 class X87UP. */
744 else if (code == TYPE_CODE_FLT && len == 16)
745 /* Class X87 and X87UP. */
746 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
747
748 /* Arguments of complex T where T is one of the types float or
749 double get treated as if they are implemented as:
750
751 struct complexT {
752 T real;
753 T imag;
754 };
755
756 */
757 else if (code == TYPE_CODE_COMPLEX && len == 8)
758 theclass[0] = AMD64_SSE;
759 else if (code == TYPE_CODE_COMPLEX && len == 16)
760 theclass[0] = theclass[1] = AMD64_SSE;
761
762 /* A variable of type complex long double is classified as type
763 COMPLEX_X87. */
764 else if (code == TYPE_CODE_COMPLEX && len == 32)
765 theclass[0] = AMD64_COMPLEX_X87;
766
767 /* Aggregates. */
768 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
769 || code == TYPE_CODE_UNION)
770 amd64_classify_aggregate (type, theclass);
771 }
772
773 static enum return_value_convention
774 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
775 struct type *type, struct regcache *regcache,
776 gdb_byte *readbuf, const gdb_byte *writebuf)
777 {
778 enum amd64_reg_class theclass[2];
779 int len = TYPE_LENGTH (type);
780 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
781 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
782 int integer_reg = 0;
783 int sse_reg = 0;
784 int i;
785
786 gdb_assert (!(readbuf && writebuf));
787
788 /* 1. Classify the return type with the classification algorithm. */
789 amd64_classify (type, theclass);
790
791 /* 2. If the type has class MEMORY, then the caller provides space
792 for the return value and passes the address of this storage in
793 %rdi as if it were the first argument to the function. In effect,
794 this address becomes a hidden first argument.
795
796 On return %rax will contain the address that has been passed in
797 by the caller in %rdi. */
798 if (theclass[0] == AMD64_MEMORY)
799 {
800 /* As indicated by the comment above, the ABI guarantees that we
801 can always find the return value just after the function has
802 returned. */
803
804 if (readbuf)
805 {
806 ULONGEST addr;
807
808 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
809 read_memory (addr, readbuf, TYPE_LENGTH (type));
810 }
811
812 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
813 }
814
815 /* 8. If the class is COMPLEX_X87, the real part of the value is
816 returned in %st0 and the imaginary part in %st1. */
817 if (theclass[0] == AMD64_COMPLEX_X87)
818 {
819 if (readbuf)
820 {
821 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
822 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
823 }
824
825 if (writebuf)
826 {
827 i387_return_value (gdbarch, regcache);
828 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
829 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
830
831 /* Fix up the tag word such that both %st(0) and %st(1) are
832 marked as valid. */
833 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
834 }
835
836 return RETURN_VALUE_REGISTER_CONVENTION;
837 }
838
839 gdb_assert (theclass[1] != AMD64_MEMORY);
840 gdb_assert (len <= 16);
841
842 for (i = 0; len > 0; i++, len -= 8)
843 {
844 int regnum = -1;
845 int offset = 0;
846
847 switch (theclass[i])
848 {
849 case AMD64_INTEGER:
850 /* 3. If the class is INTEGER, the next available register
851 of the sequence %rax, %rdx is used. */
852 regnum = integer_regnum[integer_reg++];
853 break;
854
855 case AMD64_SSE:
856 /* 4. If the class is SSE, the next available SSE register
857 of the sequence %xmm0, %xmm1 is used. */
858 regnum = sse_regnum[sse_reg++];
859 break;
860
861 case AMD64_SSEUP:
862 /* 5. If the class is SSEUP, the eightbyte is passed in the
863 upper half of the last used SSE register. */
864 gdb_assert (sse_reg > 0);
865 regnum = sse_regnum[sse_reg - 1];
866 offset = 8;
867 break;
868
869 case AMD64_X87:
870 /* 6. If the class is X87, the value is returned on the X87
871 stack in %st0 as 80-bit x87 number. */
872 regnum = AMD64_ST0_REGNUM;
873 if (writebuf)
874 i387_return_value (gdbarch, regcache);
875 break;
876
877 case AMD64_X87UP:
878 /* 7. If the class is X87UP, the value is returned together
879 with the previous X87 value in %st0. */
880 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
881 regnum = AMD64_ST0_REGNUM;
882 offset = 8;
883 len = 2;
884 break;
885
886 case AMD64_NO_CLASS:
887 continue;
888
889 default:
890 gdb_assert (!"Unexpected register class.");
891 }
892
893 gdb_assert (regnum != -1);
894
895 if (readbuf)
896 regcache->raw_read_part (regnum, offset, std::min (len, 8),
897 readbuf + i * 8);
898 if (writebuf)
899 regcache->raw_write_part (regnum, offset, std::min (len, 8),
900 writebuf + i * 8);
901 }
902
903 return RETURN_VALUE_REGISTER_CONVENTION;
904 }
905 \f
906
907 static CORE_ADDR
908 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
909 CORE_ADDR sp, function_call_return_method return_method)
910 {
911 static int integer_regnum[] =
912 {
913 AMD64_RDI_REGNUM, /* %rdi */
914 AMD64_RSI_REGNUM, /* %rsi */
915 AMD64_RDX_REGNUM, /* %rdx */
916 AMD64_RCX_REGNUM, /* %rcx */
917 AMD64_R8_REGNUM, /* %r8 */
918 AMD64_R9_REGNUM /* %r9 */
919 };
920 static int sse_regnum[] =
921 {
922 /* %xmm0 ... %xmm7 */
923 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
924 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
925 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
926 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
927 };
928 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
929 int num_stack_args = 0;
930 int num_elements = 0;
931 int element = 0;
932 int integer_reg = 0;
933 int sse_reg = 0;
934 int i;
935
936 /* Reserve a register for the "hidden" argument. */
937 if (return_method == return_method_struct)
938 integer_reg++;
939
940 for (i = 0; i < nargs; i++)
941 {
942 struct type *type = value_type (args[i]);
943 int len = TYPE_LENGTH (type);
944 enum amd64_reg_class theclass[2];
945 int needed_integer_regs = 0;
946 int needed_sse_regs = 0;
947 int j;
948
949 /* Classify argument. */
950 amd64_classify (type, theclass);
951
952 /* Calculate the number of integer and SSE registers needed for
953 this argument. */
954 for (j = 0; j < 2; j++)
955 {
956 if (theclass[j] == AMD64_INTEGER)
957 needed_integer_regs++;
958 else if (theclass[j] == AMD64_SSE)
959 needed_sse_regs++;
960 }
961
962 /* Check whether enough registers are available, and if the
963 argument should be passed in registers at all. */
964 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
965 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
966 || (needed_integer_regs == 0 && needed_sse_regs == 0))
967 {
968 /* The argument will be passed on the stack. */
969 num_elements += ((len + 7) / 8);
970 stack_args[num_stack_args++] = args[i];
971 }
972 else
973 {
974 /* The argument will be passed in registers. */
975 const gdb_byte *valbuf = value_contents (args[i]);
976 gdb_byte buf[8];
977
978 gdb_assert (len <= 16);
979
980 for (j = 0; len > 0; j++, len -= 8)
981 {
982 int regnum = -1;
983 int offset = 0;
984
985 switch (theclass[j])
986 {
987 case AMD64_INTEGER:
988 regnum = integer_regnum[integer_reg++];
989 break;
990
991 case AMD64_SSE:
992 regnum = sse_regnum[sse_reg++];
993 break;
994
995 case AMD64_SSEUP:
996 gdb_assert (sse_reg > 0);
997 regnum = sse_regnum[sse_reg - 1];
998 offset = 8;
999 break;
1000
1001 case AMD64_NO_CLASS:
1002 continue;
1003
1004 default:
1005 gdb_assert (!"Unexpected register class.");
1006 }
1007
1008 gdb_assert (regnum != -1);
1009 memset (buf, 0, sizeof buf);
1010 memcpy (buf, valbuf + j * 8, std::min (len, 8));
1011 regcache->raw_write_part (regnum, offset, 8, buf);
1012 }
1013 }
1014 }
1015
1016 /* Allocate space for the arguments on the stack. */
1017 sp -= num_elements * 8;
1018
1019 /* The psABI says that "The end of the input argument area shall be
1020 aligned on a 16 byte boundary." */
1021 sp &= ~0xf;
1022
1023 /* Write out the arguments to the stack. */
1024 for (i = 0; i < num_stack_args; i++)
1025 {
1026 struct type *type = value_type (stack_args[i]);
1027 const gdb_byte *valbuf = value_contents (stack_args[i]);
1028 int len = TYPE_LENGTH (type);
1029
1030 write_memory (sp + element * 8, valbuf, len);
1031 element += ((len + 7) / 8);
1032 }
1033
1034 /* The psABI says that "For calls that may call functions that use
1035 varargs or stdargs (prototype-less calls or calls to functions
1036 containing ellipsis (...) in the declaration) %al is used as
1037 hidden argument to specify the number of SSE registers used. */
1038 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1039 return sp;
1040 }
1041
1042 static CORE_ADDR
1043 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1044 struct regcache *regcache, CORE_ADDR bp_addr,
1045 int nargs, struct value **args, CORE_ADDR sp,
1046 function_call_return_method return_method,
1047 CORE_ADDR struct_addr)
1048 {
1049 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1050 gdb_byte buf[8];
1051
1052 /* BND registers can be in arbitrary values at the moment of the
1053 inferior call. This can cause boundary violations that are not
1054 due to a real bug or even desired by the user. The best to be done
1055 is set the BND registers to allow access to the whole memory, INIT
1056 state, before pushing the inferior call. */
1057 i387_reset_bnd_regs (gdbarch, regcache);
1058
1059 /* Pass arguments. */
1060 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1061
1062 /* Pass "hidden" argument". */
1063 if (return_method == return_method_struct)
1064 {
1065 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1066 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1067 }
1068
1069 /* Store return address. */
1070 sp -= 8;
1071 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1072 write_memory (sp, buf, 8);
1073
1074 /* Finally, update the stack pointer... */
1075 store_unsigned_integer (buf, 8, byte_order, sp);
1076 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1077
1078 /* ...and fake a frame pointer. */
1079 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1080
1081 return sp + 16;
1082 }
1083 \f
1084 /* Displaced instruction handling. */
1085
1086 /* A partially decoded instruction.
1087 This contains enough details for displaced stepping purposes. */
1088
1089 struct amd64_insn
1090 {
1091 /* The number of opcode bytes. */
1092 int opcode_len;
1093 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1094 not present. */
1095 int enc_prefix_offset;
1096 /* The offset to the first opcode byte. */
1097 int opcode_offset;
1098 /* The offset to the modrm byte or -1 if not present. */
1099 int modrm_offset;
1100
1101 /* The raw instruction. */
1102 gdb_byte *raw_insn;
1103 };
1104
1105 struct amd64_displaced_step_closure : public displaced_step_closure
1106 {
1107 amd64_displaced_step_closure (int insn_buf_len)
1108 : insn_buf (insn_buf_len, 0)
1109 {}
1110
1111 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1112 int tmp_used = 0;
1113 int tmp_regno;
1114 ULONGEST tmp_save;
1115
1116 /* Details of the instruction. */
1117 struct amd64_insn insn_details;
1118
1119 /* The possibly modified insn. */
1120 gdb::byte_vector insn_buf;
1121 };
1122
1123 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1124 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1125 at which point delete these in favor of libopcodes' versions). */
1126
1127 static const unsigned char onebyte_has_modrm[256] = {
1128 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1129 /* ------------------------------- */
1130 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1131 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1132 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1133 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1134 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1135 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1136 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1137 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1138 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1139 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1140 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1141 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1142 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1143 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1144 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1145 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1146 /* ------------------------------- */
1147 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1148 };
1149
1150 static const unsigned char twobyte_has_modrm[256] = {
1151 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1152 /* ------------------------------- */
1153 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1154 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1155 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1156 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1157 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1158 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1159 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1160 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1161 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1162 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1163 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1164 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1165 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1166 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1167 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1168 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1169 /* ------------------------------- */
1170 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1171 };
1172
1173 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1174
1175 static int
1176 rex_prefix_p (gdb_byte pfx)
1177 {
1178 return REX_PREFIX_P (pfx);
1179 }
1180
1181 /* True if PFX is the start of the 2-byte VEX prefix. */
1182
1183 static bool
1184 vex2_prefix_p (gdb_byte pfx)
1185 {
1186 return pfx == 0xc5;
1187 }
1188
1189 /* True if PFX is the start of the 3-byte VEX prefix. */
1190
1191 static bool
1192 vex3_prefix_p (gdb_byte pfx)
1193 {
1194 return pfx == 0xc4;
1195 }
1196
1197 /* Skip the legacy instruction prefixes in INSN.
1198 We assume INSN is properly sentineled so we don't have to worry
1199 about falling off the end of the buffer. */
1200
1201 static gdb_byte *
1202 amd64_skip_prefixes (gdb_byte *insn)
1203 {
1204 while (1)
1205 {
1206 switch (*insn)
1207 {
1208 case DATA_PREFIX_OPCODE:
1209 case ADDR_PREFIX_OPCODE:
1210 case CS_PREFIX_OPCODE:
1211 case DS_PREFIX_OPCODE:
1212 case ES_PREFIX_OPCODE:
1213 case FS_PREFIX_OPCODE:
1214 case GS_PREFIX_OPCODE:
1215 case SS_PREFIX_OPCODE:
1216 case LOCK_PREFIX_OPCODE:
1217 case REPE_PREFIX_OPCODE:
1218 case REPNE_PREFIX_OPCODE:
1219 ++insn;
1220 continue;
1221 default:
1222 break;
1223 }
1224 break;
1225 }
1226
1227 return insn;
1228 }
1229
1230 /* Return an integer register (other than RSP) that is unused as an input
1231 operand in INSN.
1232 In order to not require adding a rex prefix if the insn doesn't already
1233 have one, the result is restricted to RAX ... RDI, sans RSP.
1234 The register numbering of the result follows architecture ordering,
1235 e.g. RDI = 7. */
1236
1237 static int
1238 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1239 {
1240 /* 1 bit for each reg */
1241 int used_regs_mask = 0;
1242
1243 /* There can be at most 3 int regs used as inputs in an insn, and we have
1244 7 to choose from (RAX ... RDI, sans RSP).
1245 This allows us to take a conservative approach and keep things simple.
1246 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1247 that implicitly specify RAX. */
1248
1249 /* Avoid RAX. */
1250 used_regs_mask |= 1 << EAX_REG_NUM;
1251 /* Similarily avoid RDX, implicit operand in divides. */
1252 used_regs_mask |= 1 << EDX_REG_NUM;
1253 /* Avoid RSP. */
1254 used_regs_mask |= 1 << ESP_REG_NUM;
1255
1256 /* If the opcode is one byte long and there's no ModRM byte,
1257 assume the opcode specifies a register. */
1258 if (details->opcode_len == 1 && details->modrm_offset == -1)
1259 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1260
1261 /* Mark used regs in the modrm/sib bytes. */
1262 if (details->modrm_offset != -1)
1263 {
1264 int modrm = details->raw_insn[details->modrm_offset];
1265 int mod = MODRM_MOD_FIELD (modrm);
1266 int reg = MODRM_REG_FIELD (modrm);
1267 int rm = MODRM_RM_FIELD (modrm);
1268 int have_sib = mod != 3 && rm == 4;
1269
1270 /* Assume the reg field of the modrm byte specifies a register. */
1271 used_regs_mask |= 1 << reg;
1272
1273 if (have_sib)
1274 {
1275 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1276 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1277 used_regs_mask |= 1 << base;
1278 used_regs_mask |= 1 << idx;
1279 }
1280 else
1281 {
1282 used_regs_mask |= 1 << rm;
1283 }
1284 }
1285
1286 gdb_assert (used_regs_mask < 256);
1287 gdb_assert (used_regs_mask != 255);
1288
1289 /* Finally, find a free reg. */
1290 {
1291 int i;
1292
1293 for (i = 0; i < 8; ++i)
1294 {
1295 if (! (used_regs_mask & (1 << i)))
1296 return i;
1297 }
1298
1299 /* We shouldn't get here. */
1300 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1301 }
1302 }
1303
1304 /* Extract the details of INSN that we need. */
1305
1306 static void
1307 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1308 {
1309 gdb_byte *start = insn;
1310 int need_modrm;
1311
1312 details->raw_insn = insn;
1313
1314 details->opcode_len = -1;
1315 details->enc_prefix_offset = -1;
1316 details->opcode_offset = -1;
1317 details->modrm_offset = -1;
1318
1319 /* Skip legacy instruction prefixes. */
1320 insn = amd64_skip_prefixes (insn);
1321
1322 /* Skip REX/VEX instruction encoding prefixes. */
1323 if (rex_prefix_p (*insn))
1324 {
1325 details->enc_prefix_offset = insn - start;
1326 ++insn;
1327 }
1328 else if (vex2_prefix_p (*insn))
1329 {
1330 /* Don't record the offset in this case because this prefix has
1331 no REX.B equivalent. */
1332 insn += 2;
1333 }
1334 else if (vex3_prefix_p (*insn))
1335 {
1336 details->enc_prefix_offset = insn - start;
1337 insn += 3;
1338 }
1339
1340 details->opcode_offset = insn - start;
1341
1342 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1343 {
1344 /* Two or three-byte opcode. */
1345 ++insn;
1346 need_modrm = twobyte_has_modrm[*insn];
1347
1348 /* Check for three-byte opcode. */
1349 switch (*insn)
1350 {
1351 case 0x24:
1352 case 0x25:
1353 case 0x38:
1354 case 0x3a:
1355 case 0x7a:
1356 case 0x7b:
1357 ++insn;
1358 details->opcode_len = 3;
1359 break;
1360 default:
1361 details->opcode_len = 2;
1362 break;
1363 }
1364 }
1365 else
1366 {
1367 /* One-byte opcode. */
1368 need_modrm = onebyte_has_modrm[*insn];
1369 details->opcode_len = 1;
1370 }
1371
1372 if (need_modrm)
1373 {
1374 ++insn;
1375 details->modrm_offset = insn - start;
1376 }
1377 }
1378
1379 /* Update %rip-relative addressing in INSN.
1380
1381 %rip-relative addressing only uses a 32-bit displacement.
1382 32 bits is not enough to be guaranteed to cover the distance between where
1383 the real instruction is and where its copy is.
1384 Convert the insn to use base+disp addressing.
1385 We set base = pc + insn_length so we can leave disp unchanged. */
1386
1387 static void
1388 fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
1389 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1390 {
1391 const struct amd64_insn *insn_details = &dsc->insn_details;
1392 int modrm_offset = insn_details->modrm_offset;
1393 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1394 CORE_ADDR rip_base;
1395 int insn_length;
1396 int arch_tmp_regno, tmp_regno;
1397 ULONGEST orig_value;
1398
1399 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1400 ++insn;
1401
1402 /* Compute the rip-relative address. */
1403 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1404 dsc->insn_buf.size (), from);
1405 rip_base = from + insn_length;
1406
1407 /* We need a register to hold the address.
1408 Pick one not used in the insn.
1409 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1410 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1411 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1412
1413 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1414 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1415
1416 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1417 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1418 is not r8-r15. */
1419 if (insn_details->enc_prefix_offset != -1)
1420 {
1421 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1422 if (rex_prefix_p (pfx[0]))
1423 pfx[0] &= ~REX_B;
1424 else if (vex3_prefix_p (pfx[0]))
1425 pfx[1] |= VEX3_NOT_B;
1426 else
1427 gdb_assert_not_reached ("unhandled prefix");
1428 }
1429
1430 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1431 dsc->tmp_regno = tmp_regno;
1432 dsc->tmp_save = orig_value;
1433 dsc->tmp_used = 1;
1434
1435 /* Convert the ModRM field to be base+disp. */
1436 dsc->insn_buf[modrm_offset] &= ~0xc7;
1437 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1438
1439 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1440
1441 displaced_debug_printf ("%%rip-relative addressing used.");
1442 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1443 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1444 paddress (gdbarch, rip_base));
1445 }
1446
1447 static void
1448 fixup_displaced_copy (struct gdbarch *gdbarch,
1449 amd64_displaced_step_closure *dsc,
1450 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1451 {
1452 const struct amd64_insn *details = &dsc->insn_details;
1453
1454 if (details->modrm_offset != -1)
1455 {
1456 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1457
1458 if ((modrm & 0xc7) == 0x05)
1459 {
1460 /* The insn uses rip-relative addressing.
1461 Deal with it. */
1462 fixup_riprel (gdbarch, dsc, from, to, regs);
1463 }
1464 }
1465 }
1466
1467 displaced_step_closure_up
1468 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1469 CORE_ADDR from, CORE_ADDR to,
1470 struct regcache *regs)
1471 {
1472 int len = gdbarch_max_insn_length (gdbarch);
1473 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1474 continually watch for running off the end of the buffer. */
1475 int fixup_sentinel_space = len;
1476 std::unique_ptr<amd64_displaced_step_closure> dsc
1477 (new amd64_displaced_step_closure (len + fixup_sentinel_space));
1478 gdb_byte *buf = &dsc->insn_buf[0];
1479 struct amd64_insn *details = &dsc->insn_details;
1480
1481 read_memory (from, buf, len);
1482
1483 /* Set up the sentinel space so we don't have to worry about running
1484 off the end of the buffer. An excessive number of leading prefixes
1485 could otherwise cause this. */
1486 memset (buf + len, 0, fixup_sentinel_space);
1487
1488 amd64_get_insn_details (buf, details);
1489
1490 /* GDB may get control back after the insn after the syscall.
1491 Presumably this is a kernel bug.
1492 If this is a syscall, make sure there's a nop afterwards. */
1493 {
1494 int syscall_length;
1495
1496 if (amd64_syscall_p (details, &syscall_length))
1497 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1498 }
1499
1500 /* Modify the insn to cope with the address where it will be executed from.
1501 In particular, handle any rip-relative addressing. */
1502 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1503
1504 write_memory (to, buf, len);
1505
1506 displaced_debug_printf ("copy %s->%s: %s",
1507 paddress (gdbarch, from), paddress (gdbarch, to),
1508 displaced_step_dump_bytes (buf, len).c_str ());
1509
1510 /* This is a work around for a problem with g++ 4.8. */
1511 return displaced_step_closure_up (dsc.release ());
1512 }
1513
1514 static int
1515 amd64_absolute_jmp_p (const struct amd64_insn *details)
1516 {
1517 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1518
1519 if (insn[0] == 0xff)
1520 {
1521 /* jump near, absolute indirect (/4) */
1522 if ((insn[1] & 0x38) == 0x20)
1523 return 1;
1524
1525 /* jump far, absolute indirect (/5) */
1526 if ((insn[1] & 0x38) == 0x28)
1527 return 1;
1528 }
1529
1530 return 0;
1531 }
1532
1533 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1534
1535 static int
1536 amd64_jmp_p (const struct amd64_insn *details)
1537 {
1538 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1539
1540 /* jump short, relative. */
1541 if (insn[0] == 0xeb)
1542 return 1;
1543
1544 /* jump near, relative. */
1545 if (insn[0] == 0xe9)
1546 return 1;
1547
1548 return amd64_absolute_jmp_p (details);
1549 }
1550
1551 static int
1552 amd64_absolute_call_p (const struct amd64_insn *details)
1553 {
1554 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1555
1556 if (insn[0] == 0xff)
1557 {
1558 /* Call near, absolute indirect (/2) */
1559 if ((insn[1] & 0x38) == 0x10)
1560 return 1;
1561
1562 /* Call far, absolute indirect (/3) */
1563 if ((insn[1] & 0x38) == 0x18)
1564 return 1;
1565 }
1566
1567 return 0;
1568 }
1569
1570 static int
1571 amd64_ret_p (const struct amd64_insn *details)
1572 {
1573 /* NOTE: gcc can emit "repz ; ret". */
1574 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1575
1576 switch (insn[0])
1577 {
1578 case 0xc2: /* ret near, pop N bytes */
1579 case 0xc3: /* ret near */
1580 case 0xca: /* ret far, pop N bytes */
1581 case 0xcb: /* ret far */
1582 case 0xcf: /* iret */
1583 return 1;
1584
1585 default:
1586 return 0;
1587 }
1588 }
1589
1590 static int
1591 amd64_call_p (const struct amd64_insn *details)
1592 {
1593 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1594
1595 if (amd64_absolute_call_p (details))
1596 return 1;
1597
1598 /* call near, relative */
1599 if (insn[0] == 0xe8)
1600 return 1;
1601
1602 return 0;
1603 }
1604
1605 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1606 length in bytes. Otherwise, return zero. */
1607
1608 static int
1609 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1610 {
1611 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1612
1613 if (insn[0] == 0x0f && insn[1] == 0x05)
1614 {
1615 *lengthp = 2;
1616 return 1;
1617 }
1618
1619 return 0;
1620 }
1621
1622 /* Classify the instruction at ADDR using PRED.
1623 Throw an error if the memory can't be read. */
1624
1625 static int
1626 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1627 int (*pred) (const struct amd64_insn *))
1628 {
1629 struct amd64_insn details;
1630 gdb_byte *buf;
1631 int len, classification;
1632
1633 len = gdbarch_max_insn_length (gdbarch);
1634 buf = (gdb_byte *) alloca (len);
1635
1636 read_code (addr, buf, len);
1637 amd64_get_insn_details (buf, &details);
1638
1639 classification = pred (&details);
1640
1641 return classification;
1642 }
1643
1644 /* The gdbarch insn_is_call method. */
1645
1646 static int
1647 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1648 {
1649 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1650 }
1651
1652 /* The gdbarch insn_is_ret method. */
1653
1654 static int
1655 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1656 {
1657 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1658 }
1659
1660 /* The gdbarch insn_is_jump method. */
1661
1662 static int
1663 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1664 {
1665 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1666 }
1667
1668 /* Fix up the state of registers and memory after having single-stepped
1669 a displaced instruction. */
1670
1671 void
1672 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1673 struct displaced_step_closure *dsc_,
1674 CORE_ADDR from, CORE_ADDR to,
1675 struct regcache *regs)
1676 {
1677 amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
1678 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1679 /* The offset we applied to the instruction's address. */
1680 ULONGEST insn_offset = to - from;
1681 gdb_byte *insn = dsc->insn_buf.data ();
1682 const struct amd64_insn *insn_details = &dsc->insn_details;
1683
1684 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1685 paddress (gdbarch, from), paddress (gdbarch, to),
1686 insn[0], insn[1]);
1687
1688 /* If we used a tmp reg, restore it. */
1689
1690 if (dsc->tmp_used)
1691 {
1692 displaced_debug_printf ("restoring reg %d to %s",
1693 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1694 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1695 }
1696
1697 /* The list of issues to contend with here is taken from
1698 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1699 Yay for Free Software! */
1700
1701 /* Relocate the %rip back to the program's instruction stream,
1702 if necessary. */
1703
1704 /* Except in the case of absolute or indirect jump or call
1705 instructions, or a return instruction, the new rip is relative to
1706 the displaced instruction; make it relative to the original insn.
1707 Well, signal handler returns don't need relocation either, but we use the
1708 value of %rip to recognize those; see below. */
1709 if (! amd64_absolute_jmp_p (insn_details)
1710 && ! amd64_absolute_call_p (insn_details)
1711 && ! amd64_ret_p (insn_details))
1712 {
1713 ULONGEST orig_rip;
1714 int insn_len;
1715
1716 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1717
1718 /* A signal trampoline system call changes the %rip, resuming
1719 execution of the main program after the signal handler has
1720 returned. That makes them like 'return' instructions; we
1721 shouldn't relocate %rip.
1722
1723 But most system calls don't, and we do need to relocate %rip.
1724
1725 Our heuristic for distinguishing these cases: if stepping
1726 over the system call instruction left control directly after
1727 the instruction, the we relocate --- control almost certainly
1728 doesn't belong in the displaced copy. Otherwise, we assume
1729 the instruction has put control where it belongs, and leave
1730 it unrelocated. Goodness help us if there are PC-relative
1731 system calls. */
1732 if (amd64_syscall_p (insn_details, &insn_len)
1733 && orig_rip != to + insn_len
1734 /* GDB can get control back after the insn after the syscall.
1735 Presumably this is a kernel bug.
1736 Fixup ensures its a nop, we add one to the length for it. */
1737 && orig_rip != to + insn_len + 1)
1738 displaced_debug_printf ("syscall changed %%rip; not relocating");
1739 else
1740 {
1741 ULONGEST rip = orig_rip - insn_offset;
1742
1743 /* If we just stepped over a breakpoint insn, we don't backup
1744 the pc on purpose; this is to match behaviour without
1745 stepping. */
1746
1747 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1748
1749 displaced_debug_printf ("relocated %%rip from %s to %s",
1750 paddress (gdbarch, orig_rip),
1751 paddress (gdbarch, rip));
1752 }
1753 }
1754
1755 /* If the instruction was PUSHFL, then the TF bit will be set in the
1756 pushed value, and should be cleared. We'll leave this for later,
1757 since GDB already messes up the TF flag when stepping over a
1758 pushfl. */
1759
1760 /* If the instruction was a call, the return address now atop the
1761 stack is the address following the copied instruction. We need
1762 to make it the address following the original instruction. */
1763 if (amd64_call_p (insn_details))
1764 {
1765 ULONGEST rsp;
1766 ULONGEST retaddr;
1767 const ULONGEST retaddr_len = 8;
1768
1769 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1770 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1771 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1772 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1773
1774 displaced_debug_printf ("relocated return addr at %s to %s",
1775 paddress (gdbarch, rsp),
1776 paddress (gdbarch, retaddr));
1777 }
1778 }
1779
1780 /* If the instruction INSN uses RIP-relative addressing, return the
1781 offset into the raw INSN where the displacement to be adjusted is
1782 found. Returns 0 if the instruction doesn't use RIP-relative
1783 addressing. */
1784
1785 static int
1786 rip_relative_offset (struct amd64_insn *insn)
1787 {
1788 if (insn->modrm_offset != -1)
1789 {
1790 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1791
1792 if ((modrm & 0xc7) == 0x05)
1793 {
1794 /* The displacement is found right after the ModRM byte. */
1795 return insn->modrm_offset + 1;
1796 }
1797 }
1798
1799 return 0;
1800 }
1801
1802 static void
1803 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1804 {
1805 target_write_memory (*to, buf, len);
1806 *to += len;
1807 }
1808
1809 static void
1810 amd64_relocate_instruction (struct gdbarch *gdbarch,
1811 CORE_ADDR *to, CORE_ADDR oldloc)
1812 {
1813 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1814 int len = gdbarch_max_insn_length (gdbarch);
1815 /* Extra space for sentinels. */
1816 int fixup_sentinel_space = len;
1817 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1818 struct amd64_insn insn_details;
1819 int offset = 0;
1820 LONGEST rel32, newrel;
1821 gdb_byte *insn;
1822 int insn_length;
1823
1824 read_memory (oldloc, buf, len);
1825
1826 /* Set up the sentinel space so we don't have to worry about running
1827 off the end of the buffer. An excessive number of leading prefixes
1828 could otherwise cause this. */
1829 memset (buf + len, 0, fixup_sentinel_space);
1830
1831 insn = buf;
1832 amd64_get_insn_details (insn, &insn_details);
1833
1834 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1835
1836 /* Skip legacy instruction prefixes. */
1837 insn = amd64_skip_prefixes (insn);
1838
1839 /* Adjust calls with 32-bit relative addresses as push/jump, with
1840 the address pushed being the location where the original call in
1841 the user program would return to. */
1842 if (insn[0] == 0xe8)
1843 {
1844 gdb_byte push_buf[32];
1845 CORE_ADDR ret_addr;
1846 int i = 0;
1847
1848 /* Where "ret" in the original code will return to. */
1849 ret_addr = oldloc + insn_length;
1850
1851 /* If pushing an address higher than or equal to 0x80000000,
1852 avoid 'pushq', as that sign extends its 32-bit operand, which
1853 would be incorrect. */
1854 if (ret_addr <= 0x7fffffff)
1855 {
1856 push_buf[0] = 0x68; /* pushq $... */
1857 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1858 i = 5;
1859 }
1860 else
1861 {
1862 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1863 push_buf[i++] = 0x83;
1864 push_buf[i++] = 0xec;
1865 push_buf[i++] = 0x08;
1866
1867 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1868 push_buf[i++] = 0x04;
1869 push_buf[i++] = 0x24;
1870 store_unsigned_integer (&push_buf[i], 4, byte_order,
1871 ret_addr & 0xffffffff);
1872 i += 4;
1873
1874 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1875 push_buf[i++] = 0x44;
1876 push_buf[i++] = 0x24;
1877 push_buf[i++] = 0x04;
1878 store_unsigned_integer (&push_buf[i], 4, byte_order,
1879 ret_addr >> 32);
1880 i += 4;
1881 }
1882 gdb_assert (i <= sizeof (push_buf));
1883 /* Push the push. */
1884 append_insns (to, i, push_buf);
1885
1886 /* Convert the relative call to a relative jump. */
1887 insn[0] = 0xe9;
1888
1889 /* Adjust the destination offset. */
1890 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1891 newrel = (oldloc - *to) + rel32;
1892 store_signed_integer (insn + 1, 4, byte_order, newrel);
1893
1894 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1895 hex_string (rel32), paddress (gdbarch, oldloc),
1896 hex_string (newrel), paddress (gdbarch, *to));
1897
1898 /* Write the adjusted jump into its displaced location. */
1899 append_insns (to, 5, insn);
1900 return;
1901 }
1902
1903 offset = rip_relative_offset (&insn_details);
1904 if (!offset)
1905 {
1906 /* Adjust jumps with 32-bit relative addresses. Calls are
1907 already handled above. */
1908 if (insn[0] == 0xe9)
1909 offset = 1;
1910 /* Adjust conditional jumps. */
1911 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1912 offset = 2;
1913 }
1914
1915 if (offset)
1916 {
1917 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1918 newrel = (oldloc - *to) + rel32;
1919 store_signed_integer (insn + offset, 4, byte_order, newrel);
1920 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1921 hex_string (rel32), paddress (gdbarch, oldloc),
1922 hex_string (newrel), paddress (gdbarch, *to));
1923 }
1924
1925 /* Write the adjusted instruction into its displaced location. */
1926 append_insns (to, insn_length, buf);
1927 }
1928
1929 \f
1930 /* The maximum number of saved registers. This should include %rip. */
1931 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1932
1933 struct amd64_frame_cache
1934 {
1935 /* Base address. */
1936 CORE_ADDR base;
1937 int base_p;
1938 CORE_ADDR sp_offset;
1939 CORE_ADDR pc;
1940
1941 /* Saved registers. */
1942 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1943 CORE_ADDR saved_sp;
1944 int saved_sp_reg;
1945
1946 /* Do we have a frame? */
1947 int frameless_p;
1948 };
1949
1950 /* Initialize a frame cache. */
1951
1952 static void
1953 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1954 {
1955 int i;
1956
1957 /* Base address. */
1958 cache->base = 0;
1959 cache->base_p = 0;
1960 cache->sp_offset = -8;
1961 cache->pc = 0;
1962
1963 /* Saved registers. We initialize these to -1 since zero is a valid
1964 offset (that's where %rbp is supposed to be stored).
1965 The values start out as being offsets, and are later converted to
1966 addresses (at which point -1 is interpreted as an address, still meaning
1967 "invalid"). */
1968 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1969 cache->saved_regs[i] = -1;
1970 cache->saved_sp = 0;
1971 cache->saved_sp_reg = -1;
1972
1973 /* Frameless until proven otherwise. */
1974 cache->frameless_p = 1;
1975 }
1976
1977 /* Allocate and initialize a frame cache. */
1978
1979 static struct amd64_frame_cache *
1980 amd64_alloc_frame_cache (void)
1981 {
1982 struct amd64_frame_cache *cache;
1983
1984 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1985 amd64_init_frame_cache (cache);
1986 return cache;
1987 }
1988
1989 /* GCC 4.4 and later, can put code in the prologue to realign the
1990 stack pointer. Check whether PC points to such code, and update
1991 CACHE accordingly. Return the first instruction after the code
1992 sequence or CURRENT_PC, whichever is smaller. If we don't
1993 recognize the code, return PC. */
1994
1995 static CORE_ADDR
1996 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1997 struct amd64_frame_cache *cache)
1998 {
1999 /* There are 2 code sequences to re-align stack before the frame
2000 gets set up:
2001
2002 1. Use a caller-saved saved register:
2003
2004 leaq 8(%rsp), %reg
2005 andq $-XXX, %rsp
2006 pushq -8(%reg)
2007
2008 2. Use a callee-saved saved register:
2009
2010 pushq %reg
2011 leaq 16(%rsp), %reg
2012 andq $-XXX, %rsp
2013 pushq -8(%reg)
2014
2015 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2016
2017 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2018 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2019 */
2020
2021 gdb_byte buf[18];
2022 int reg, r;
2023 int offset, offset_and;
2024
2025 if (target_read_code (pc, buf, sizeof buf))
2026 return pc;
2027
2028 /* Check caller-saved saved register. The first instruction has
2029 to be "leaq 8(%rsp), %reg". */
2030 if ((buf[0] & 0xfb) == 0x48
2031 && buf[1] == 0x8d
2032 && buf[3] == 0x24
2033 && buf[4] == 0x8)
2034 {
2035 /* MOD must be binary 10 and R/M must be binary 100. */
2036 if ((buf[2] & 0xc7) != 0x44)
2037 return pc;
2038
2039 /* REG has register number. */
2040 reg = (buf[2] >> 3) & 7;
2041
2042 /* Check the REX.R bit. */
2043 if (buf[0] == 0x4c)
2044 reg += 8;
2045
2046 offset = 5;
2047 }
2048 else
2049 {
2050 /* Check callee-saved saved register. The first instruction
2051 has to be "pushq %reg". */
2052 reg = 0;
2053 if ((buf[0] & 0xf8) == 0x50)
2054 offset = 0;
2055 else if ((buf[0] & 0xf6) == 0x40
2056 && (buf[1] & 0xf8) == 0x50)
2057 {
2058 /* Check the REX.B bit. */
2059 if ((buf[0] & 1) != 0)
2060 reg = 8;
2061
2062 offset = 1;
2063 }
2064 else
2065 return pc;
2066
2067 /* Get register. */
2068 reg += buf[offset] & 0x7;
2069
2070 offset++;
2071
2072 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2073 if ((buf[offset] & 0xfb) != 0x48
2074 || buf[offset + 1] != 0x8d
2075 || buf[offset + 3] != 0x24
2076 || buf[offset + 4] != 0x10)
2077 return pc;
2078
2079 /* MOD must be binary 10 and R/M must be binary 100. */
2080 if ((buf[offset + 2] & 0xc7) != 0x44)
2081 return pc;
2082
2083 /* REG has register number. */
2084 r = (buf[offset + 2] >> 3) & 7;
2085
2086 /* Check the REX.R bit. */
2087 if (buf[offset] == 0x4c)
2088 r += 8;
2089
2090 /* Registers in pushq and leaq have to be the same. */
2091 if (reg != r)
2092 return pc;
2093
2094 offset += 5;
2095 }
2096
2097 /* Rigister can't be %rsp nor %rbp. */
2098 if (reg == 4 || reg == 5)
2099 return pc;
2100
2101 /* The next instruction has to be "andq $-XXX, %rsp". */
2102 if (buf[offset] != 0x48
2103 || buf[offset + 2] != 0xe4
2104 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2105 return pc;
2106
2107 offset_and = offset;
2108 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2109
2110 /* The next instruction has to be "pushq -8(%reg)". */
2111 r = 0;
2112 if (buf[offset] == 0xff)
2113 offset++;
2114 else if ((buf[offset] & 0xf6) == 0x40
2115 && buf[offset + 1] == 0xff)
2116 {
2117 /* Check the REX.B bit. */
2118 if ((buf[offset] & 0x1) != 0)
2119 r = 8;
2120 offset += 2;
2121 }
2122 else
2123 return pc;
2124
2125 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2126 01. */
2127 if (buf[offset + 1] != 0xf8
2128 || (buf[offset] & 0xf8) != 0x70)
2129 return pc;
2130
2131 /* R/M has register. */
2132 r += buf[offset] & 7;
2133
2134 /* Registers in leaq and pushq have to be the same. */
2135 if (reg != r)
2136 return pc;
2137
2138 if (current_pc > pc + offset_and)
2139 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2140
2141 return std::min (pc + offset + 2, current_pc);
2142 }
2143
2144 /* Similar to amd64_analyze_stack_align for x32. */
2145
2146 static CORE_ADDR
2147 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2148 struct amd64_frame_cache *cache)
2149 {
2150 /* There are 2 code sequences to re-align stack before the frame
2151 gets set up:
2152
2153 1. Use a caller-saved saved register:
2154
2155 leaq 8(%rsp), %reg
2156 andq $-XXX, %rsp
2157 pushq -8(%reg)
2158
2159 or
2160
2161 [addr32] leal 8(%rsp), %reg
2162 andl $-XXX, %esp
2163 [addr32] pushq -8(%reg)
2164
2165 2. Use a callee-saved saved register:
2166
2167 pushq %reg
2168 leaq 16(%rsp), %reg
2169 andq $-XXX, %rsp
2170 pushq -8(%reg)
2171
2172 or
2173
2174 pushq %reg
2175 [addr32] leal 16(%rsp), %reg
2176 andl $-XXX, %esp
2177 [addr32] pushq -8(%reg)
2178
2179 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2180
2181 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2182 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2183
2184 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2185
2186 0x83 0xe4 0xf0 andl $-16, %esp
2187 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2188 */
2189
2190 gdb_byte buf[19];
2191 int reg, r;
2192 int offset, offset_and;
2193
2194 if (target_read_memory (pc, buf, sizeof buf))
2195 return pc;
2196
2197 /* Skip optional addr32 prefix. */
2198 offset = buf[0] == 0x67 ? 1 : 0;
2199
2200 /* Check caller-saved saved register. The first instruction has
2201 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2202 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2203 && buf[offset + 1] == 0x8d
2204 && buf[offset + 3] == 0x24
2205 && buf[offset + 4] == 0x8)
2206 {
2207 /* MOD must be binary 10 and R/M must be binary 100. */
2208 if ((buf[offset + 2] & 0xc7) != 0x44)
2209 return pc;
2210
2211 /* REG has register number. */
2212 reg = (buf[offset + 2] >> 3) & 7;
2213
2214 /* Check the REX.R bit. */
2215 if ((buf[offset] & 0x4) != 0)
2216 reg += 8;
2217
2218 offset += 5;
2219 }
2220 else
2221 {
2222 /* Check callee-saved saved register. The first instruction
2223 has to be "pushq %reg". */
2224 reg = 0;
2225 if ((buf[offset] & 0xf6) == 0x40
2226 && (buf[offset + 1] & 0xf8) == 0x50)
2227 {
2228 /* Check the REX.B bit. */
2229 if ((buf[offset] & 1) != 0)
2230 reg = 8;
2231
2232 offset += 1;
2233 }
2234 else if ((buf[offset] & 0xf8) != 0x50)
2235 return pc;
2236
2237 /* Get register. */
2238 reg += buf[offset] & 0x7;
2239
2240 offset++;
2241
2242 /* Skip optional addr32 prefix. */
2243 if (buf[offset] == 0x67)
2244 offset++;
2245
2246 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2247 "leal 16(%rsp), %reg". */
2248 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2249 || buf[offset + 1] != 0x8d
2250 || buf[offset + 3] != 0x24
2251 || buf[offset + 4] != 0x10)
2252 return pc;
2253
2254 /* MOD must be binary 10 and R/M must be binary 100. */
2255 if ((buf[offset + 2] & 0xc7) != 0x44)
2256 return pc;
2257
2258 /* REG has register number. */
2259 r = (buf[offset + 2] >> 3) & 7;
2260
2261 /* Check the REX.R bit. */
2262 if ((buf[offset] & 0x4) != 0)
2263 r += 8;
2264
2265 /* Registers in pushq and leaq have to be the same. */
2266 if (reg != r)
2267 return pc;
2268
2269 offset += 5;
2270 }
2271
2272 /* Rigister can't be %rsp nor %rbp. */
2273 if (reg == 4 || reg == 5)
2274 return pc;
2275
2276 /* The next instruction may be "andq $-XXX, %rsp" or
2277 "andl $-XXX, %esp". */
2278 if (buf[offset] != 0x48)
2279 offset--;
2280
2281 if (buf[offset + 2] != 0xe4
2282 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2283 return pc;
2284
2285 offset_and = offset;
2286 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2287
2288 /* Skip optional addr32 prefix. */
2289 if (buf[offset] == 0x67)
2290 offset++;
2291
2292 /* The next instruction has to be "pushq -8(%reg)". */
2293 r = 0;
2294 if (buf[offset] == 0xff)
2295 offset++;
2296 else if ((buf[offset] & 0xf6) == 0x40
2297 && buf[offset + 1] == 0xff)
2298 {
2299 /* Check the REX.B bit. */
2300 if ((buf[offset] & 0x1) != 0)
2301 r = 8;
2302 offset += 2;
2303 }
2304 else
2305 return pc;
2306
2307 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2308 01. */
2309 if (buf[offset + 1] != 0xf8
2310 || (buf[offset] & 0xf8) != 0x70)
2311 return pc;
2312
2313 /* R/M has register. */
2314 r += buf[offset] & 7;
2315
2316 /* Registers in leaq and pushq have to be the same. */
2317 if (reg != r)
2318 return pc;
2319
2320 if (current_pc > pc + offset_and)
2321 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2322
2323 return std::min (pc + offset + 2, current_pc);
2324 }
2325
2326 /* Do a limited analysis of the prologue at PC and update CACHE
2327 accordingly. Bail out early if CURRENT_PC is reached. Return the
2328 address where the analysis stopped.
2329
2330 We will handle only functions beginning with:
2331
2332 pushq %rbp 0x55
2333 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2334
2335 or (for the X32 ABI):
2336
2337 pushq %rbp 0x55
2338 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2339
2340 The `endbr64` instruction can be found before these sequences, and will be
2341 skipped if found.
2342
2343 Any function that doesn't start with one of these sequences will be
2344 assumed to have no prologue and thus no valid frame pointer in
2345 %rbp. */
2346
2347 static CORE_ADDR
2348 amd64_analyze_prologue (struct gdbarch *gdbarch,
2349 CORE_ADDR pc, CORE_ADDR current_pc,
2350 struct amd64_frame_cache *cache)
2351 {
2352 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2353 /* The `endbr64` instruction. */
2354 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2355 /* There are two variations of movq %rsp, %rbp. */
2356 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2357 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2358 /* Ditto for movl %esp, %ebp. */
2359 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2360 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2361
2362 gdb_byte buf[3];
2363 gdb_byte op;
2364
2365 if (current_pc <= pc)
2366 return current_pc;
2367
2368 if (gdbarch_ptr_bit (gdbarch) == 32)
2369 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2370 else
2371 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2372
2373 op = read_code_unsigned_integer (pc, 1, byte_order);
2374
2375 /* Check for the `endbr64` instruction, skip it if found. */
2376 if (op == endbr64[0])
2377 {
2378 read_code (pc + 1, buf, 3);
2379
2380 if (memcmp (buf, &endbr64[1], 3) == 0)
2381 pc += 4;
2382
2383 op = read_code_unsigned_integer (pc, 1, byte_order);
2384 }
2385
2386 if (current_pc <= pc)
2387 return current_pc;
2388
2389 if (op == 0x55) /* pushq %rbp */
2390 {
2391 /* Take into account that we've executed the `pushq %rbp' that
2392 starts this instruction sequence. */
2393 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2394 cache->sp_offset += 8;
2395
2396 /* If that's all, return now. */
2397 if (current_pc <= pc + 1)
2398 return current_pc;
2399
2400 read_code (pc + 1, buf, 3);
2401
2402 /* Check for `movq %rsp, %rbp'. */
2403 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2404 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2405 {
2406 /* OK, we actually have a frame. */
2407 cache->frameless_p = 0;
2408 return pc + 4;
2409 }
2410
2411 /* For X32, also check for `movl %esp, %ebp'. */
2412 if (gdbarch_ptr_bit (gdbarch) == 32)
2413 {
2414 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2415 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2416 {
2417 /* OK, we actually have a frame. */
2418 cache->frameless_p = 0;
2419 return pc + 3;
2420 }
2421 }
2422
2423 return pc + 1;
2424 }
2425
2426 return pc;
2427 }
2428
2429 /* Work around false termination of prologue - GCC PR debug/48827.
2430
2431 START_PC is the first instruction of a function, PC is its minimal already
2432 determined advanced address. Function returns PC if it has nothing to do.
2433
2434 84 c0 test %al,%al
2435 74 23 je after
2436 <-- here is 0 lines advance - the false prologue end marker.
2437 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2438 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2439 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2440 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2441 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2442 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2443 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2444 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2445 after: */
2446
2447 static CORE_ADDR
2448 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2449 {
2450 struct symtab_and_line start_pc_sal, next_sal;
2451 gdb_byte buf[4 + 8 * 7];
2452 int offset, xmmreg;
2453
2454 if (pc == start_pc)
2455 return pc;
2456
2457 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2458 if (start_pc_sal.symtab == NULL
2459 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2460 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2461 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2462 return pc;
2463
2464 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2465 if (next_sal.line != start_pc_sal.line)
2466 return pc;
2467
2468 /* START_PC can be from overlayed memory, ignored here. */
2469 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2470 return pc;
2471
2472 /* test %al,%al */
2473 if (buf[0] != 0x84 || buf[1] != 0xc0)
2474 return pc;
2475 /* je AFTER */
2476 if (buf[2] != 0x74)
2477 return pc;
2478
2479 offset = 4;
2480 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2481 {
2482 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2483 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2484 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2485 return pc;
2486
2487 /* 0b01?????? */
2488 if ((buf[offset + 2] & 0xc0) == 0x40)
2489 {
2490 /* 8-bit displacement. */
2491 offset += 4;
2492 }
2493 /* 0b10?????? */
2494 else if ((buf[offset + 2] & 0xc0) == 0x80)
2495 {
2496 /* 32-bit displacement. */
2497 offset += 7;
2498 }
2499 else
2500 return pc;
2501 }
2502
2503 /* je AFTER */
2504 if (offset - 4 != buf[3])
2505 return pc;
2506
2507 return next_sal.end;
2508 }
2509
2510 /* Return PC of first real instruction. */
2511
2512 static CORE_ADDR
2513 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2514 {
2515 struct amd64_frame_cache cache;
2516 CORE_ADDR pc;
2517 CORE_ADDR func_addr;
2518
2519 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2520 {
2521 CORE_ADDR post_prologue_pc
2522 = skip_prologue_using_sal (gdbarch, func_addr);
2523 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2524
2525 /* LLVM backend (Clang/Flang) always emits a line note before the
2526 prologue and another one after. We trust clang to emit usable
2527 line notes. */
2528 if (post_prologue_pc
2529 && (cust != NULL
2530 && COMPUNIT_PRODUCER (cust) != NULL
2531 && producer_is_llvm (COMPUNIT_PRODUCER (cust))))
2532 return std::max (start_pc, post_prologue_pc);
2533 }
2534
2535 amd64_init_frame_cache (&cache);
2536 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2537 &cache);
2538 if (cache.frameless_p)
2539 return start_pc;
2540
2541 return amd64_skip_xmm_prologue (pc, start_pc);
2542 }
2543 \f
2544
2545 /* Normal frames. */
2546
2547 static void
2548 amd64_frame_cache_1 (struct frame_info *this_frame,
2549 struct amd64_frame_cache *cache)
2550 {
2551 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2552 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2553 gdb_byte buf[8];
2554 int i;
2555
2556 cache->pc = get_frame_func (this_frame);
2557 if (cache->pc != 0)
2558 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2559 cache);
2560
2561 if (cache->frameless_p)
2562 {
2563 /* We didn't find a valid frame. If we're at the start of a
2564 function, or somewhere half-way its prologue, the function's
2565 frame probably hasn't been fully setup yet. Try to
2566 reconstruct the base address for the stack frame by looking
2567 at the stack pointer. For truly "frameless" functions this
2568 might work too. */
2569
2570 if (cache->saved_sp_reg != -1)
2571 {
2572 /* Stack pointer has been saved. */
2573 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2574 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2575
2576 /* We're halfway aligning the stack. */
2577 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2578 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2579
2580 /* This will be added back below. */
2581 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2582 }
2583 else
2584 {
2585 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2586 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2587 + cache->sp_offset;
2588 }
2589 }
2590 else
2591 {
2592 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2593 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2594 }
2595
2596 /* Now that we have the base address for the stack frame we can
2597 calculate the value of %rsp in the calling frame. */
2598 cache->saved_sp = cache->base + 16;
2599
2600 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2601 frame we find it at the same offset from the reconstructed base
2602 address. If we're halfway aligning the stack, %rip is handled
2603 differently (see above). */
2604 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2605 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2606
2607 /* Adjust all the saved registers such that they contain addresses
2608 instead of offsets. */
2609 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2610 if (cache->saved_regs[i] != -1)
2611 cache->saved_regs[i] += cache->base;
2612
2613 cache->base_p = 1;
2614 }
2615
2616 static struct amd64_frame_cache *
2617 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2618 {
2619 struct amd64_frame_cache *cache;
2620
2621 if (*this_cache)
2622 return (struct amd64_frame_cache *) *this_cache;
2623
2624 cache = amd64_alloc_frame_cache ();
2625 *this_cache = cache;
2626
2627 try
2628 {
2629 amd64_frame_cache_1 (this_frame, cache);
2630 }
2631 catch (const gdb_exception_error &ex)
2632 {
2633 if (ex.error != NOT_AVAILABLE_ERROR)
2634 throw;
2635 }
2636
2637 return cache;
2638 }
2639
2640 static enum unwind_stop_reason
2641 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2642 void **this_cache)
2643 {
2644 struct amd64_frame_cache *cache =
2645 amd64_frame_cache (this_frame, this_cache);
2646
2647 if (!cache->base_p)
2648 return UNWIND_UNAVAILABLE;
2649
2650 /* This marks the outermost frame. */
2651 if (cache->base == 0)
2652 return UNWIND_OUTERMOST;
2653
2654 return UNWIND_NO_REASON;
2655 }
2656
2657 static void
2658 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2659 struct frame_id *this_id)
2660 {
2661 struct amd64_frame_cache *cache =
2662 amd64_frame_cache (this_frame, this_cache);
2663
2664 if (!cache->base_p)
2665 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2666 else if (cache->base == 0)
2667 {
2668 /* This marks the outermost frame. */
2669 return;
2670 }
2671 else
2672 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2673 }
2674
2675 static struct value *
2676 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2677 int regnum)
2678 {
2679 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2680 struct amd64_frame_cache *cache =
2681 amd64_frame_cache (this_frame, this_cache);
2682
2683 gdb_assert (regnum >= 0);
2684
2685 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2686 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2687
2688 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2689 return frame_unwind_got_memory (this_frame, regnum,
2690 cache->saved_regs[regnum]);
2691
2692 return frame_unwind_got_register (this_frame, regnum, regnum);
2693 }
2694
2695 static const struct frame_unwind amd64_frame_unwind =
2696 {
2697 NORMAL_FRAME,
2698 amd64_frame_unwind_stop_reason,
2699 amd64_frame_this_id,
2700 amd64_frame_prev_register,
2701 NULL,
2702 default_frame_sniffer
2703 };
2704 \f
2705 /* Generate a bytecode expression to get the value of the saved PC. */
2706
2707 static void
2708 amd64_gen_return_address (struct gdbarch *gdbarch,
2709 struct agent_expr *ax, struct axs_value *value,
2710 CORE_ADDR scope)
2711 {
2712 /* The following sequence assumes the traditional use of the base
2713 register. */
2714 ax_reg (ax, AMD64_RBP_REGNUM);
2715 ax_const_l (ax, 8);
2716 ax_simple (ax, aop_add);
2717 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2718 value->kind = axs_lvalue_memory;
2719 }
2720 \f
2721
2722 /* Signal trampolines. */
2723
2724 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2725 64-bit variants. This would require using identical frame caches
2726 on both platforms. */
2727
2728 static struct amd64_frame_cache *
2729 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2730 {
2731 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2732 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2733 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2734 struct amd64_frame_cache *cache;
2735 CORE_ADDR addr;
2736 gdb_byte buf[8];
2737 int i;
2738
2739 if (*this_cache)
2740 return (struct amd64_frame_cache *) *this_cache;
2741
2742 cache = amd64_alloc_frame_cache ();
2743
2744 try
2745 {
2746 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2747 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2748
2749 addr = tdep->sigcontext_addr (this_frame);
2750 gdb_assert (tdep->sc_reg_offset);
2751 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2752 for (i = 0; i < tdep->sc_num_regs; i++)
2753 if (tdep->sc_reg_offset[i] != -1)
2754 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2755
2756 cache->base_p = 1;
2757 }
2758 catch (const gdb_exception_error &ex)
2759 {
2760 if (ex.error != NOT_AVAILABLE_ERROR)
2761 throw;
2762 }
2763
2764 *this_cache = cache;
2765 return cache;
2766 }
2767
2768 static enum unwind_stop_reason
2769 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2770 void **this_cache)
2771 {
2772 struct amd64_frame_cache *cache =
2773 amd64_sigtramp_frame_cache (this_frame, this_cache);
2774
2775 if (!cache->base_p)
2776 return UNWIND_UNAVAILABLE;
2777
2778 return UNWIND_NO_REASON;
2779 }
2780
2781 static void
2782 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2783 void **this_cache, struct frame_id *this_id)
2784 {
2785 struct amd64_frame_cache *cache =
2786 amd64_sigtramp_frame_cache (this_frame, this_cache);
2787
2788 if (!cache->base_p)
2789 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2790 else if (cache->base == 0)
2791 {
2792 /* This marks the outermost frame. */
2793 return;
2794 }
2795 else
2796 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2797 }
2798
2799 static struct value *
2800 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2801 void **this_cache, int regnum)
2802 {
2803 /* Make sure we've initialized the cache. */
2804 amd64_sigtramp_frame_cache (this_frame, this_cache);
2805
2806 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2807 }
2808
2809 static int
2810 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2811 struct frame_info *this_frame,
2812 void **this_cache)
2813 {
2814 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2815
2816 /* We shouldn't even bother if we don't have a sigcontext_addr
2817 handler. */
2818 if (tdep->sigcontext_addr == NULL)
2819 return 0;
2820
2821 if (tdep->sigtramp_p != NULL)
2822 {
2823 if (tdep->sigtramp_p (this_frame))
2824 return 1;
2825 }
2826
2827 if (tdep->sigtramp_start != 0)
2828 {
2829 CORE_ADDR pc = get_frame_pc (this_frame);
2830
2831 gdb_assert (tdep->sigtramp_end != 0);
2832 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2833 return 1;
2834 }
2835
2836 return 0;
2837 }
2838
2839 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2840 {
2841 SIGTRAMP_FRAME,
2842 amd64_sigtramp_frame_unwind_stop_reason,
2843 amd64_sigtramp_frame_this_id,
2844 amd64_sigtramp_frame_prev_register,
2845 NULL,
2846 amd64_sigtramp_frame_sniffer
2847 };
2848 \f
2849
2850 static CORE_ADDR
2851 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2852 {
2853 struct amd64_frame_cache *cache =
2854 amd64_frame_cache (this_frame, this_cache);
2855
2856 return cache->base;
2857 }
2858
2859 static const struct frame_base amd64_frame_base =
2860 {
2861 &amd64_frame_unwind,
2862 amd64_frame_base_address,
2863 amd64_frame_base_address,
2864 amd64_frame_base_address
2865 };
2866
2867 /* Normal frames, but in a function epilogue. */
2868
2869 /* Implement the stack_frame_destroyed_p gdbarch method.
2870
2871 The epilogue is defined here as the 'ret' instruction, which will
2872 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2873 the function's stack frame. */
2874
2875 static int
2876 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2877 {
2878 gdb_byte insn;
2879 struct compunit_symtab *cust;
2880
2881 cust = find_pc_compunit_symtab (pc);
2882 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2883 return 0;
2884
2885 if (target_read_memory (pc, &insn, 1))
2886 return 0; /* Can't read memory at pc. */
2887
2888 if (insn != 0xc3) /* 'ret' instruction. */
2889 return 0;
2890
2891 return 1;
2892 }
2893
2894 static int
2895 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2896 struct frame_info *this_frame,
2897 void **this_prologue_cache)
2898 {
2899 if (frame_relative_level (this_frame) == 0)
2900 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2901 get_frame_pc (this_frame));
2902 else
2903 return 0;
2904 }
2905
2906 static struct amd64_frame_cache *
2907 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2908 {
2909 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2910 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2911 struct amd64_frame_cache *cache;
2912 gdb_byte buf[8];
2913
2914 if (*this_cache)
2915 return (struct amd64_frame_cache *) *this_cache;
2916
2917 cache = amd64_alloc_frame_cache ();
2918 *this_cache = cache;
2919
2920 try
2921 {
2922 /* Cache base will be %esp plus cache->sp_offset (-8). */
2923 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2924 cache->base = extract_unsigned_integer (buf, 8,
2925 byte_order) + cache->sp_offset;
2926
2927 /* Cache pc will be the frame func. */
2928 cache->pc = get_frame_pc (this_frame);
2929
2930 /* The saved %esp will be at cache->base plus 16. */
2931 cache->saved_sp = cache->base + 16;
2932
2933 /* The saved %eip will be at cache->base plus 8. */
2934 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2935
2936 cache->base_p = 1;
2937 }
2938 catch (const gdb_exception_error &ex)
2939 {
2940 if (ex.error != NOT_AVAILABLE_ERROR)
2941 throw;
2942 }
2943
2944 return cache;
2945 }
2946
2947 static enum unwind_stop_reason
2948 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2949 void **this_cache)
2950 {
2951 struct amd64_frame_cache *cache
2952 = amd64_epilogue_frame_cache (this_frame, this_cache);
2953
2954 if (!cache->base_p)
2955 return UNWIND_UNAVAILABLE;
2956
2957 return UNWIND_NO_REASON;
2958 }
2959
2960 static void
2961 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2962 void **this_cache,
2963 struct frame_id *this_id)
2964 {
2965 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2966 this_cache);
2967
2968 if (!cache->base_p)
2969 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2970 else
2971 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2972 }
2973
2974 static const struct frame_unwind amd64_epilogue_frame_unwind =
2975 {
2976 NORMAL_FRAME,
2977 amd64_epilogue_frame_unwind_stop_reason,
2978 amd64_epilogue_frame_this_id,
2979 amd64_frame_prev_register,
2980 NULL,
2981 amd64_epilogue_frame_sniffer
2982 };
2983
2984 static struct frame_id
2985 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2986 {
2987 CORE_ADDR fp;
2988
2989 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2990
2991 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2992 }
2993
2994 /* 16 byte align the SP per frame requirements. */
2995
2996 static CORE_ADDR
2997 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2998 {
2999 return sp & -(CORE_ADDR)16;
3000 }
3001 \f
3002
3003 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3004 in the floating-point register set REGSET to register cache
3005 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3006
3007 static void
3008 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3009 int regnum, const void *fpregs, size_t len)
3010 {
3011 struct gdbarch *gdbarch = regcache->arch ();
3012 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3013
3014 gdb_assert (len >= tdep->sizeof_fpregset);
3015 amd64_supply_fxsave (regcache, regnum, fpregs);
3016 }
3017
3018 /* Collect register REGNUM from the register cache REGCACHE and store
3019 it in the buffer specified by FPREGS and LEN as described by the
3020 floating-point register set REGSET. If REGNUM is -1, do this for
3021 all registers in REGSET. */
3022
3023 static void
3024 amd64_collect_fpregset (const struct regset *regset,
3025 const struct regcache *regcache,
3026 int regnum, void *fpregs, size_t len)
3027 {
3028 struct gdbarch *gdbarch = regcache->arch ();
3029 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3030
3031 gdb_assert (len >= tdep->sizeof_fpregset);
3032 amd64_collect_fxsave (regcache, regnum, fpregs);
3033 }
3034
3035 const struct regset amd64_fpregset =
3036 {
3037 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3038 };
3039 \f
3040
3041 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3042 %rdi. We expect its value to be a pointer to the jmp_buf structure
3043 from which we extract the address that we will land at. This
3044 address is copied into PC. This routine returns non-zero on
3045 success. */
3046
3047 static int
3048 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
3049 {
3050 gdb_byte buf[8];
3051 CORE_ADDR jb_addr;
3052 struct gdbarch *gdbarch = get_frame_arch (frame);
3053 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
3054 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
3055
3056 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3057 longjmp will land. */
3058 if (jb_pc_offset == -1)
3059 return 0;
3060
3061 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3062 jb_addr= extract_typed_address
3063 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3064 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3065 return 0;
3066
3067 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3068
3069 return 1;
3070 }
3071
3072 static const int amd64_record_regmap[] =
3073 {
3074 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3075 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3076 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3077 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3078 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3079 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3080 };
3081
3082 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3083
3084 static bool
3085 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3086 {
3087 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3088 AMD64_RAX_REGNUM,
3089 AMD64_RIP_REGNUM);
3090 }
3091
3092 void
3093 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3094 const target_desc *default_tdesc)
3095 {
3096 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3097 const struct target_desc *tdesc = info.target_desc;
3098 static const char *const stap_integer_prefixes[] = { "$", NULL };
3099 static const char *const stap_register_prefixes[] = { "%", NULL };
3100 static const char *const stap_register_indirection_prefixes[] = { "(",
3101 NULL };
3102 static const char *const stap_register_indirection_suffixes[] = { ")",
3103 NULL };
3104
3105 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3106 floating-point registers. */
3107 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3108 tdep->fpregset = &amd64_fpregset;
3109
3110 if (! tdesc_has_registers (tdesc))
3111 tdesc = default_tdesc;
3112 tdep->tdesc = tdesc;
3113
3114 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3115 tdep->register_names = amd64_register_names;
3116
3117 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3118 {
3119 tdep->zmmh_register_names = amd64_zmmh_names;
3120 tdep->k_register_names = amd64_k_names;
3121 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3122 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3123
3124 tdep->num_zmm_regs = 32;
3125 tdep->num_xmm_avx512_regs = 16;
3126 tdep->num_ymm_avx512_regs = 16;
3127
3128 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3129 tdep->k0_regnum = AMD64_K0_REGNUM;
3130 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3131 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3132 }
3133
3134 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3135 {
3136 tdep->ymmh_register_names = amd64_ymmh_names;
3137 tdep->num_ymm_regs = 16;
3138 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3139 }
3140
3141 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3142 {
3143 tdep->mpx_register_names = amd64_mpx_names;
3144 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3145 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3146 }
3147
3148 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3149 {
3150 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3151 }
3152
3153 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3154 {
3155 tdep->pkeys_register_names = amd64_pkeys_names;
3156 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3157 tdep->num_pkeys_regs = 1;
3158 }
3159
3160 tdep->num_byte_regs = 20;
3161 tdep->num_word_regs = 16;
3162 tdep->num_dword_regs = 16;
3163 /* Avoid wiring in the MMX registers for now. */
3164 tdep->num_mmx_regs = 0;
3165
3166 set_gdbarch_pseudo_register_read_value (gdbarch,
3167 amd64_pseudo_register_read_value);
3168 set_gdbarch_pseudo_register_write (gdbarch,
3169 amd64_pseudo_register_write);
3170 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3171 amd64_ax_pseudo_register_collect);
3172
3173 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3174
3175 /* AMD64 has an FPU and 16 SSE registers. */
3176 tdep->st0_regnum = AMD64_ST0_REGNUM;
3177 tdep->num_xmm_regs = 16;
3178
3179 /* This is what all the fuss is about. */
3180 set_gdbarch_long_bit (gdbarch, 64);
3181 set_gdbarch_long_long_bit (gdbarch, 64);
3182 set_gdbarch_ptr_bit (gdbarch, 64);
3183
3184 /* In contrast to the i386, on AMD64 a `long double' actually takes
3185 up 128 bits, even though it's still based on the i387 extended
3186 floating-point format which has only 80 significant bits. */
3187 set_gdbarch_long_double_bit (gdbarch, 128);
3188
3189 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3190
3191 /* Register numbers of various important registers. */
3192 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3193 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3194 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3195 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3196
3197 /* The "default" register numbering scheme for AMD64 is referred to
3198 as the "DWARF Register Number Mapping" in the System V psABI.
3199 The preferred debugging format for all known AMD64 targets is
3200 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3201 DWARF-1), but we provide the same mapping just in case. This
3202 mapping is also used for stabs, which GCC does support. */
3203 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3204 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3205
3206 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3207 be in use on any of the supported AMD64 targets. */
3208
3209 /* Call dummy code. */
3210 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3211 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3212 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3213
3214 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3215 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3216 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3217
3218 set_gdbarch_return_value (gdbarch, amd64_return_value);
3219
3220 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3221
3222 tdep->record_regmap = amd64_record_regmap;
3223
3224 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3225
3226 /* Hook the function epilogue frame unwinder. This unwinder is
3227 appended to the list first, so that it supercedes the other
3228 unwinders in function epilogues. */
3229 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3230
3231 /* Hook the prologue-based frame unwinders. */
3232 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3233 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3234 frame_base_set_default (gdbarch, &amd64_frame_base);
3235
3236 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3237
3238 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3239
3240 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3241
3242 /* SystemTap variables and functions. */
3243 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3244 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3245 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3246 stap_register_indirection_prefixes);
3247 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3248 stap_register_indirection_suffixes);
3249 set_gdbarch_stap_is_single_operand (gdbarch,
3250 i386_stap_is_single_operand);
3251 set_gdbarch_stap_parse_special_token (gdbarch,
3252 i386_stap_parse_special_token);
3253 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3254 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3255 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3256
3257 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3258 amd64_in_indirect_branch_thunk);
3259 }
3260
3261 /* Initialize ARCH for x86-64, no osabi. */
3262
3263 static void
3264 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3265 {
3266 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3267 true));
3268 }
3269
3270 static struct type *
3271 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3272 {
3273 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3274
3275 switch (regnum - tdep->eax_regnum)
3276 {
3277 case AMD64_RBP_REGNUM: /* %ebp */
3278 case AMD64_RSP_REGNUM: /* %esp */
3279 return builtin_type (gdbarch)->builtin_data_ptr;
3280 case AMD64_RIP_REGNUM: /* %eip */
3281 return builtin_type (gdbarch)->builtin_func_ptr;
3282 }
3283
3284 return i386_pseudo_register_type (gdbarch, regnum);
3285 }
3286
3287 void
3288 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3289 const target_desc *default_tdesc)
3290 {
3291 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3292
3293 amd64_init_abi (info, gdbarch, default_tdesc);
3294
3295 tdep->num_dword_regs = 17;
3296 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3297
3298 set_gdbarch_long_bit (gdbarch, 32);
3299 set_gdbarch_ptr_bit (gdbarch, 32);
3300 }
3301
3302 /* Initialize ARCH for x64-32, no osabi. */
3303
3304 static void
3305 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3306 {
3307 amd64_x32_init_abi (info, arch,
3308 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3309 }
3310
3311 /* Return the target description for a specified XSAVE feature mask. */
3312
3313 const struct target_desc *
3314 amd64_target_description (uint64_t xcr0, bool segments)
3315 {
3316 static target_desc *amd64_tdescs \
3317 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3318 target_desc **tdesc;
3319
3320 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3321 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3322 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3323 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3324 [segments ? 1 : 0];
3325
3326 if (*tdesc == NULL)
3327 *tdesc = amd64_create_target_description (xcr0, false, false,
3328 segments);
3329
3330 return *tdesc;
3331 }
3332
3333 void _initialize_amd64_tdep ();
3334 void
3335 _initialize_amd64_tdep ()
3336 {
3337 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3338 amd64_none_init_abi);
3339 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3340 amd64_x32_none_init_abi);
3341 }
3342 \f
3343
3344 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3345 sense that the instruction pointer and data pointer are simply
3346 64-bit offsets into the code segment and the data segment instead
3347 of a selector offset pair. The functions below store the upper 32
3348 bits of these pointers (instead of just the 16-bits of the segment
3349 selector). */
3350
3351 /* Fill register REGNUM in REGCACHE with the appropriate
3352 floating-point or SSE register value from *FXSAVE. If REGNUM is
3353 -1, do this for all registers. This function masks off any of the
3354 reserved bits in *FXSAVE. */
3355
3356 void
3357 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3358 const void *fxsave)
3359 {
3360 struct gdbarch *gdbarch = regcache->arch ();
3361 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3362
3363 i387_supply_fxsave (regcache, regnum, fxsave);
3364
3365 if (fxsave
3366 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3367 {
3368 const gdb_byte *regs = (const gdb_byte *) fxsave;
3369
3370 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3371 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3372 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3373 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3374 }
3375 }
3376
3377 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3378
3379 void
3380 amd64_supply_xsave (struct regcache *regcache, int regnum,
3381 const void *xsave)
3382 {
3383 struct gdbarch *gdbarch = regcache->arch ();
3384 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3385
3386 i387_supply_xsave (regcache, regnum, xsave);
3387
3388 if (xsave
3389 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3390 {
3391 const gdb_byte *regs = (const gdb_byte *) xsave;
3392 ULONGEST clear_bv;
3393
3394 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3395
3396 /* If the FISEG and FOSEG registers have not been initialised yet
3397 (their CLEAR_BV bit is set) then their default values of zero will
3398 have already been setup by I387_SUPPLY_XSAVE. */
3399 if (!(clear_bv & X86_XSTATE_X87))
3400 {
3401 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3402 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3403 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3404 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3405 }
3406 }
3407 }
3408
3409 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3410 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3411 all registers. This function doesn't touch any of the reserved
3412 bits in *FXSAVE. */
3413
3414 void
3415 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3416 void *fxsave)
3417 {
3418 struct gdbarch *gdbarch = regcache->arch ();
3419 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3420 gdb_byte *regs = (gdb_byte *) fxsave;
3421
3422 i387_collect_fxsave (regcache, regnum, fxsave);
3423
3424 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3425 {
3426 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3427 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3428 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3429 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3430 }
3431 }
3432
3433 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3434
3435 void
3436 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3437 void *xsave, int gcore)
3438 {
3439 struct gdbarch *gdbarch = regcache->arch ();
3440 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3441 gdb_byte *regs = (gdb_byte *) xsave;
3442
3443 i387_collect_xsave (regcache, regnum, xsave, gcore);
3444
3445 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3446 {
3447 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3448 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3449 regs + 12);
3450 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3451 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3452 regs + 20);
3453 }
3454 }
This page took 0.168711 seconds and 4 git commands to generate.