1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 Contributed by Jiri Smid, SuSE Labs.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "opcode/i386.h"
26 #include "arch-utils.h"
28 #include "dummy-frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
40 #include "gdb_assert.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
45 #include "features/i386/amd64.c"
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
54 /* Register information. */
56 static const char *amd64_register_names
[] =
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs
[] =
80 AMD64_RDI_REGNUM
, /* %rdi */
81 AMD64_RSI_REGNUM
, /* %rsi */
82 AMD64_RDX_REGNUM
, /* %rdx */
83 AMD64_RCX_REGNUM
, /* %rcx */
88 /* DWARF Register Number Mapping as defined in the System V psABI,
91 static int amd64_dwarf_regmap
[] =
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
,
95 AMD64_RCX_REGNUM
, AMD64_RBX_REGNUM
,
96 AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
98 /* Frame Pointer Register RBP. */
101 /* Stack Pointer Register RSP. */
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
107 /* Return Address RA. Mapped to RIP. */
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
112 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
113 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
114 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM
+ 8, AMD64_XMM0_REGNUM
+ 9,
118 AMD64_XMM0_REGNUM
+ 10, AMD64_XMM0_REGNUM
+ 11,
119 AMD64_XMM0_REGNUM
+ 12, AMD64_XMM0_REGNUM
+ 13,
120 AMD64_XMM0_REGNUM
+ 14, AMD64_XMM0_REGNUM
+ 15,
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM
+ 0, AMD64_ST0_REGNUM
+ 1,
124 AMD64_ST0_REGNUM
+ 2, AMD64_ST0_REGNUM
+ 3,
125 AMD64_ST0_REGNUM
+ 4, AMD64_ST0_REGNUM
+ 5,
126 AMD64_ST0_REGNUM
+ 6, AMD64_ST0_REGNUM
+ 7,
128 /* Control and Status Flags Register. */
131 /* Selector Registers. */
141 /* Segment Base Address Registers. */
147 /* Special Selector Registers. */
151 /* Floating Point Control Registers. */
157 static const int amd64_dwarf_regmap_len
=
158 (sizeof (amd64_dwarf_regmap
) / sizeof (amd64_dwarf_regmap
[0]));
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
164 amd64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
168 if (reg
>= 0 && reg
< amd64_dwarf_regmap_len
)
169 regnum
= amd64_dwarf_regmap
[reg
];
172 warning (_("Unmapped DWARF Register #%d encountered."), reg
);
177 /* Map architectural register numbers to gdb register numbers. */
179 static const int amd64_arch_regmap
[16] =
181 AMD64_RAX_REGNUM
, /* %rax */
182 AMD64_RCX_REGNUM
, /* %rcx */
183 AMD64_RDX_REGNUM
, /* %rdx */
184 AMD64_RBX_REGNUM
, /* %rbx */
185 AMD64_RSP_REGNUM
, /* %rsp */
186 AMD64_RBP_REGNUM
, /* %rbp */
187 AMD64_RSI_REGNUM
, /* %rsi */
188 AMD64_RDI_REGNUM
, /* %rdi */
189 AMD64_R8_REGNUM
, /* %r8 */
190 AMD64_R9_REGNUM
, /* %r9 */
191 AMD64_R10_REGNUM
, /* %r10 */
192 AMD64_R11_REGNUM
, /* %r11 */
193 AMD64_R12_REGNUM
, /* %r12 */
194 AMD64_R13_REGNUM
, /* %r13 */
195 AMD64_R14_REGNUM
, /* %r14 */
196 AMD64_R15_REGNUM
/* %r15 */
199 static const int amd64_arch_regmap_len
=
200 (sizeof (amd64_arch_regmap
) / sizeof (amd64_arch_regmap
[0]));
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
206 amd64_arch_reg_to_regnum (int reg
)
208 gdb_assert (reg
>= 0 && reg
< amd64_arch_regmap_len
);
210 return amd64_arch_regmap
[reg
];
213 /* Register names for byte pseudo-registers. */
215 static const char *amd64_byte_names
[] =
217 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
218 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
219 "ah", "bh", "ch", "dh"
222 /* Number of lower byte registers. */
223 #define AMD64_NUM_LOWER_BYTE_REGS 16
225 /* Register names for word pseudo-registers. */
227 static const char *amd64_word_names
[] =
229 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
230 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
233 /* Register names for dword pseudo-registers. */
235 static const char *amd64_dword_names
[] =
237 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
238 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
241 /* Return the name of register REGNUM. */
244 amd64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
246 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
247 if (i386_byte_regnum_p (gdbarch
, regnum
))
248 return amd64_byte_names
[regnum
- tdep
->al_regnum
];
249 else if (i386_word_regnum_p (gdbarch
, regnum
))
250 return amd64_word_names
[regnum
- tdep
->ax_regnum
];
251 else if (i386_dword_regnum_p (gdbarch
, regnum
))
252 return amd64_dword_names
[regnum
- tdep
->eax_regnum
];
254 return i386_pseudo_register_name (gdbarch
, regnum
);
258 amd64_pseudo_register_read (struct gdbarch
*gdbarch
,
259 struct regcache
*regcache
,
260 int regnum
, gdb_byte
*buf
)
262 gdb_byte raw_buf
[MAX_REGISTER_SIZE
];
263 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
265 if (i386_byte_regnum_p (gdbarch
, regnum
))
267 int gpnum
= regnum
- tdep
->al_regnum
;
269 /* Extract (always little endian). */
270 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
272 /* Special handling for AH, BH, CH, DH. */
273 regcache_raw_read (regcache
,
274 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
, raw_buf
);
275 memcpy (buf
, raw_buf
+ 1, 1);
279 regcache_raw_read (regcache
, gpnum
, raw_buf
);
280 memcpy (buf
, raw_buf
, 1);
283 else if (i386_dword_regnum_p (gdbarch
, regnum
))
285 int gpnum
= regnum
- tdep
->eax_regnum
;
286 /* Extract (always little endian). */
287 regcache_raw_read (regcache
, gpnum
, raw_buf
);
288 memcpy (buf
, raw_buf
, 4);
291 i386_pseudo_register_read (gdbarch
, regcache
, regnum
, buf
);
295 amd64_pseudo_register_write (struct gdbarch
*gdbarch
,
296 struct regcache
*regcache
,
297 int regnum
, const gdb_byte
*buf
)
299 gdb_byte raw_buf
[MAX_REGISTER_SIZE
];
300 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
302 if (i386_byte_regnum_p (gdbarch
, regnum
))
304 int gpnum
= regnum
- tdep
->al_regnum
;
306 if (gpnum
>= AMD64_NUM_LOWER_BYTE_REGS
)
308 /* Read ... AH, BH, CH, DH. */
309 regcache_raw_read (regcache
,
310 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
, raw_buf
);
311 /* ... Modify ... (always little endian). */
312 memcpy (raw_buf
+ 1, buf
, 1);
314 regcache_raw_write (regcache
,
315 gpnum
- AMD64_NUM_LOWER_BYTE_REGS
, raw_buf
);
320 regcache_raw_read (regcache
, gpnum
, raw_buf
);
321 /* ... Modify ... (always little endian). */
322 memcpy (raw_buf
, buf
, 1);
324 regcache_raw_write (regcache
, gpnum
, raw_buf
);
327 else if (i386_dword_regnum_p (gdbarch
, regnum
))
329 int gpnum
= regnum
- tdep
->eax_regnum
;
332 regcache_raw_read (regcache
, gpnum
, raw_buf
);
333 /* ... Modify ... (always little endian). */
334 memcpy (raw_buf
, buf
, 4);
336 regcache_raw_write (regcache
, gpnum
, raw_buf
);
339 i386_pseudo_register_write (gdbarch
, regcache
, regnum
, buf
);
344 /* Return the union class of CLASS1 and CLASS2. See the psABI for
347 static enum amd64_reg_class
348 amd64_merge_classes (enum amd64_reg_class class1
, enum amd64_reg_class class2
)
350 /* Rule (a): If both classes are equal, this is the resulting class. */
351 if (class1
== class2
)
354 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
355 is the other class. */
356 if (class1
== AMD64_NO_CLASS
)
358 if (class2
== AMD64_NO_CLASS
)
361 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
362 if (class1
== AMD64_MEMORY
|| class2
== AMD64_MEMORY
)
365 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
366 if (class1
== AMD64_INTEGER
|| class2
== AMD64_INTEGER
)
367 return AMD64_INTEGER
;
369 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
370 MEMORY is used as class. */
371 if (class1
== AMD64_X87
|| class1
== AMD64_X87UP
372 || class1
== AMD64_COMPLEX_X87
|| class2
== AMD64_X87
373 || class2
== AMD64_X87UP
|| class2
== AMD64_COMPLEX_X87
)
376 /* Rule (f): Otherwise class SSE is used. */
380 /* Return non-zero if TYPE is a non-POD structure or union type. */
383 amd64_non_pod_p (struct type
*type
)
385 /* ??? A class with a base class certainly isn't POD, but does this
386 catch all non-POD structure types? */
387 if (TYPE_CODE (type
) == TYPE_CODE_STRUCT
&& TYPE_N_BASECLASSES (type
) > 0)
393 /* Classify TYPE according to the rules for aggregate (structures and
394 arrays) and union types, and store the result in CLASS. */
397 amd64_classify_aggregate (struct type
*type
, enum amd64_reg_class
class[2])
399 int len
= TYPE_LENGTH (type
);
401 /* 1. If the size of an object is larger than two eightbytes, or in
402 C++, is a non-POD structure or union type, or contains
403 unaligned fields, it has class memory. */
404 if (len
> 16 || amd64_non_pod_p (type
))
406 class[0] = class[1] = AMD64_MEMORY
;
410 /* 2. Both eightbytes get initialized to class NO_CLASS. */
411 class[0] = class[1] = AMD64_NO_CLASS
;
413 /* 3. Each field of an object is classified recursively so that
414 always two fields are considered. The resulting class is
415 calculated according to the classes of the fields in the
418 if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
)
420 struct type
*subtype
= check_typedef (TYPE_TARGET_TYPE (type
));
422 /* All fields in an array have the same type. */
423 amd64_classify (subtype
, class);
424 if (len
> 8 && class[1] == AMD64_NO_CLASS
)
431 /* Structure or union. */
432 gdb_assert (TYPE_CODE (type
) == TYPE_CODE_STRUCT
433 || TYPE_CODE (type
) == TYPE_CODE_UNION
);
435 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
437 struct type
*subtype
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
438 int pos
= TYPE_FIELD_BITPOS (type
, i
) / 64;
439 enum amd64_reg_class subclass
[2];
440 int bitsize
= TYPE_FIELD_BITSIZE (type
, i
);
444 bitsize
= TYPE_LENGTH (subtype
) * 8;
445 endpos
= (TYPE_FIELD_BITPOS (type
, i
) + bitsize
- 1) / 64;
447 /* Ignore static fields. */
448 if (field_is_static (&TYPE_FIELD (type
, i
)))
451 gdb_assert (pos
== 0 || pos
== 1);
453 amd64_classify (subtype
, subclass
);
454 class[pos
] = amd64_merge_classes (class[pos
], subclass
[0]);
455 if (bitsize
<= 64 && pos
== 0 && endpos
== 1)
456 /* This is a bit of an odd case: We have a field that would
457 normally fit in one of the two eightbytes, except that
458 it is placed in a way that this field straddles them.
459 This has been seen with a structure containing an array.
461 The ABI is a bit unclear in this case, but we assume that
462 this field's class (stored in subclass[0]) must also be merged
463 into class[1]. In other words, our field has a piece stored
464 in the second eight-byte, and thus its class applies to
465 the second eight-byte as well.
467 In the case where the field length exceeds 8 bytes,
468 it should not be necessary to merge the field class
469 into class[1]. As LEN > 8, subclass[1] is necessarily
470 different from AMD64_NO_CLASS. If subclass[1] is equal
471 to subclass[0], then the normal class[1]/subclass[1]
472 merging will take care of everything. For subclass[1]
473 to be different from subclass[0], I can only see the case
474 where we have a SSE/SSEUP or X87/X87UP pair, which both
475 use up all 16 bytes of the aggregate, and are already
476 handled just fine (because each portion sits on its own
478 class[1] = amd64_merge_classes (class[1], subclass
[0]);
480 class[1] = amd64_merge_classes (class[1], subclass
[1]);
484 /* 4. Then a post merger cleanup is done: */
486 /* Rule (a): If one of the classes is MEMORY, the whole argument is
488 if (class[0] == AMD64_MEMORY
|| class[1] == AMD64_MEMORY
)
489 class[0] = class[1] = AMD64_MEMORY
;
491 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
493 if (class[0] == AMD64_SSEUP
)
494 class[0] = AMD64_SSE
;
495 if (class[1] == AMD64_SSEUP
&& class[0] != AMD64_SSE
)
496 class[1] = AMD64_SSE
;
499 /* Classify TYPE, and store the result in CLASS. */
502 amd64_classify (struct type
*type
, enum amd64_reg_class
class[2])
504 enum type_code code
= TYPE_CODE (type
);
505 int len
= TYPE_LENGTH (type
);
507 class[0] = class[1] = AMD64_NO_CLASS
;
509 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
510 long, long long, and pointers are in the INTEGER class. Similarly,
511 range types, used by languages such as Ada, are also in the INTEGER
513 if ((code
== TYPE_CODE_INT
|| code
== TYPE_CODE_ENUM
514 || code
== TYPE_CODE_BOOL
|| code
== TYPE_CODE_RANGE
515 || code
== TYPE_CODE_CHAR
516 || code
== TYPE_CODE_PTR
|| code
== TYPE_CODE_REF
)
517 && (len
== 1 || len
== 2 || len
== 4 || len
== 8))
518 class[0] = AMD64_INTEGER
;
520 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
522 else if ((code
== TYPE_CODE_FLT
|| code
== TYPE_CODE_DECFLOAT
)
523 && (len
== 4 || len
== 8))
525 class[0] = AMD64_SSE
;
527 /* Arguments of types __float128, _Decimal128 and __m128 are split into
528 two halves. The least significant ones belong to class SSE, the most
529 significant one to class SSEUP. */
530 else if (code
== TYPE_CODE_DECFLOAT
&& len
== 16)
531 /* FIXME: __float128, __m128. */
532 class[0] = AMD64_SSE
, class[1] = AMD64_SSEUP
;
534 /* The 64-bit mantissa of arguments of type long double belongs to
535 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
537 else if (code
== TYPE_CODE_FLT
&& len
== 16)
538 /* Class X87 and X87UP. */
539 class[0] = AMD64_X87
, class[1] = AMD64_X87UP
;
542 else if (code
== TYPE_CODE_ARRAY
|| code
== TYPE_CODE_STRUCT
543 || code
== TYPE_CODE_UNION
)
544 amd64_classify_aggregate (type
, class);
547 static enum return_value_convention
548 amd64_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
549 struct type
*type
, struct regcache
*regcache
,
550 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
552 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
553 enum amd64_reg_class
class[2];
554 int len
= TYPE_LENGTH (type
);
555 static int integer_regnum
[] = { AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
};
556 static int sse_regnum
[] = { AMD64_XMM0_REGNUM
, AMD64_XMM1_REGNUM
};
561 gdb_assert (!(readbuf
&& writebuf
));
562 gdb_assert (tdep
->classify
);
564 /* 1. Classify the return type with the classification algorithm. */
565 tdep
->classify (type
, class);
567 /* 2. If the type has class MEMORY, then the caller provides space
568 for the return value and passes the address of this storage in
569 %rdi as if it were the first argument to the function. In effect,
570 this address becomes a hidden first argument.
572 On return %rax will contain the address that has been passed in
573 by the caller in %rdi. */
574 if (class[0] == AMD64_MEMORY
)
576 /* As indicated by the comment above, the ABI guarantees that we
577 can always find the return value just after the function has
584 regcache_raw_read_unsigned (regcache
, AMD64_RAX_REGNUM
, &addr
);
585 read_memory (addr
, readbuf
, TYPE_LENGTH (type
));
588 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
591 gdb_assert (class[1] != AMD64_MEMORY
);
592 gdb_assert (len
<= 16);
594 for (i
= 0; len
> 0; i
++, len
-= 8)
602 /* 3. If the class is INTEGER, the next available register
603 of the sequence %rax, %rdx is used. */
604 regnum
= integer_regnum
[integer_reg
++];
608 /* 4. If the class is SSE, the next available SSE register
609 of the sequence %xmm0, %xmm1 is used. */
610 regnum
= sse_regnum
[sse_reg
++];
614 /* 5. If the class is SSEUP, the eightbyte is passed in the
615 upper half of the last used SSE register. */
616 gdb_assert (sse_reg
> 0);
617 regnum
= sse_regnum
[sse_reg
- 1];
622 /* 6. If the class is X87, the value is returned on the X87
623 stack in %st0 as 80-bit x87 number. */
624 regnum
= AMD64_ST0_REGNUM
;
626 i387_return_value (gdbarch
, regcache
);
630 /* 7. If the class is X87UP, the value is returned together
631 with the previous X87 value in %st0. */
632 gdb_assert (i
> 0 && class[0] == AMD64_X87
);
633 regnum
= AMD64_ST0_REGNUM
;
642 gdb_assert (!"Unexpected register class.");
645 gdb_assert (regnum
!= -1);
648 regcache_raw_read_part (regcache
, regnum
, offset
, min (len
, 8),
651 regcache_raw_write_part (regcache
, regnum
, offset
, min (len
, 8),
655 return RETURN_VALUE_REGISTER_CONVENTION
;
660 amd64_push_arguments (struct regcache
*regcache
, int nargs
,
661 struct value
**args
, CORE_ADDR sp
, int struct_return
)
663 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
664 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
665 int *integer_regs
= tdep
->call_dummy_integer_regs
;
666 int num_integer_regs
= tdep
->call_dummy_num_integer_regs
;
668 static int sse_regnum
[] =
670 /* %xmm0 ... %xmm7 */
671 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
672 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
673 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
674 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
676 struct value
**stack_args
= alloca (nargs
* sizeof (struct value
*));
677 /* An array that mirrors the stack_args array. For all arguments
678 that are passed by MEMORY, if that argument's address also needs
679 to be stored in a register, the ARG_ADDR_REGNO array will contain
680 that register number (or a negative value otherwise). */
681 int *arg_addr_regno
= alloca (nargs
* sizeof (int));
682 int num_stack_args
= 0;
683 int num_elements
= 0;
689 gdb_assert (tdep
->classify
);
691 /* Reserve a register for the "hidden" argument. */
695 for (i
= 0; i
< nargs
; i
++)
697 struct type
*type
= value_type (args
[i
]);
698 int len
= TYPE_LENGTH (type
);
699 enum amd64_reg_class
class[2];
700 int needed_integer_regs
= 0;
701 int needed_sse_regs
= 0;
704 /* Classify argument. */
705 tdep
->classify (type
, class);
707 /* Calculate the number of integer and SSE registers needed for
709 for (j
= 0; j
< 2; j
++)
711 if (class[j
] == AMD64_INTEGER
)
712 needed_integer_regs
++;
713 else if (class[j
] == AMD64_SSE
)
717 /* Check whether enough registers are available, and if the
718 argument should be passed in registers at all. */
719 if (integer_reg
+ needed_integer_regs
> num_integer_regs
720 || sse_reg
+ needed_sse_regs
> ARRAY_SIZE (sse_regnum
)
721 || (needed_integer_regs
== 0 && needed_sse_regs
== 0))
723 /* The argument will be passed on the stack. */
724 num_elements
+= ((len
+ 7) / 8);
725 stack_args
[num_stack_args
] = args
[i
];
726 /* If this is an AMD64_MEMORY argument whose address must also
727 be passed in one of the integer registers, reserve that
728 register and associate this value to that register so that
729 we can store the argument address as soon as we know it. */
730 if (class[0] == AMD64_MEMORY
731 && tdep
->memory_args_by_pointer
732 && integer_reg
< tdep
->call_dummy_num_integer_regs
)
733 arg_addr_regno
[num_stack_args
] =
734 tdep
->call_dummy_integer_regs
[integer_reg
++];
736 arg_addr_regno
[num_stack_args
] = -1;
741 /* The argument will be passed in registers. */
742 const gdb_byte
*valbuf
= value_contents (args
[i
]);
745 gdb_assert (len
<= 16);
747 for (j
= 0; len
> 0; j
++, len
-= 8)
755 regnum
= integer_regs
[integer_reg
++];
759 regnum
= sse_regnum
[sse_reg
++];
763 gdb_assert (sse_reg
> 0);
764 regnum
= sse_regnum
[sse_reg
- 1];
769 gdb_assert (!"Unexpected register class.");
772 gdb_assert (regnum
!= -1);
773 memset (buf
, 0, sizeof buf
);
774 memcpy (buf
, valbuf
+ j
* 8, min (len
, 8));
775 regcache_raw_write_part (regcache
, regnum
, offset
, 8, buf
);
780 /* Allocate space for the arguments on the stack. */
781 sp
-= num_elements
* 8;
783 /* The psABI says that "The end of the input argument area shall be
784 aligned on a 16 byte boundary." */
787 /* Write out the arguments to the stack. */
788 for (i
= 0; i
< num_stack_args
; i
++)
790 struct type
*type
= value_type (stack_args
[i
]);
791 const gdb_byte
*valbuf
= value_contents (stack_args
[i
]);
792 int len
= TYPE_LENGTH (type
);
793 CORE_ADDR arg_addr
= sp
+ element
* 8;
795 write_memory (arg_addr
, valbuf
, len
);
796 if (arg_addr_regno
[i
] >= 0)
798 /* We also need to store the address of that argument in
799 the given register. */
801 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
803 store_unsigned_integer (buf
, 8, byte_order
, arg_addr
);
804 regcache_cooked_write (regcache
, arg_addr_regno
[i
], buf
);
806 element
+= ((len
+ 7) / 8);
809 /* The psABI says that "For calls that may call functions that use
810 varargs or stdargs (prototype-less calls or calls to functions
811 containing ellipsis (...) in the declaration) %al is used as
812 hidden argument to specify the number of SSE registers used. */
813 regcache_raw_write_unsigned (regcache
, AMD64_RAX_REGNUM
, sse_reg
);
818 amd64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
819 struct regcache
*regcache
, CORE_ADDR bp_addr
,
820 int nargs
, struct value
**args
, CORE_ADDR sp
,
821 int struct_return
, CORE_ADDR struct_addr
)
823 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
824 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
827 /* Pass arguments. */
828 sp
= amd64_push_arguments (regcache
, nargs
, args
, sp
, struct_return
);
830 /* Pass "hidden" argument". */
833 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
834 /* The "hidden" argument is passed throught the first argument
836 const int arg_regnum
= tdep
->call_dummy_integer_regs
[0];
838 store_unsigned_integer (buf
, 8, byte_order
, struct_addr
);
839 regcache_cooked_write (regcache
, arg_regnum
, buf
);
842 /* Reserve some memory on the stack for the integer-parameter registers,
843 if required by the ABI. */
844 if (tdep
->integer_param_regs_saved_in_caller_frame
)
845 sp
-= tdep
->call_dummy_num_integer_regs
* 8;
847 /* Store return address. */
849 store_unsigned_integer (buf
, 8, byte_order
, bp_addr
);
850 write_memory (sp
, buf
, 8);
852 /* Finally, update the stack pointer... */
853 store_unsigned_integer (buf
, 8, byte_order
, sp
);
854 regcache_cooked_write (regcache
, AMD64_RSP_REGNUM
, buf
);
856 /* ...and fake a frame pointer. */
857 regcache_cooked_write (regcache
, AMD64_RBP_REGNUM
, buf
);
862 /* Displaced instruction handling. */
864 /* A partially decoded instruction.
865 This contains enough details for displaced stepping purposes. */
869 /* The number of opcode bytes. */
871 /* The offset of the rex prefix or -1 if not present. */
873 /* The offset to the first opcode byte. */
875 /* The offset to the modrm byte or -1 if not present. */
878 /* The raw instruction. */
882 struct displaced_step_closure
884 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
889 /* Details of the instruction. */
890 struct amd64_insn insn_details
;
892 /* Amount of space allocated to insn_buf. */
895 /* The possibly modified insn.
896 This is a variable-length field. */
897 gdb_byte insn_buf
[1];
900 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
901 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
902 at which point delete these in favor of libopcodes' versions). */
904 static const unsigned char onebyte_has_modrm
[256] = {
905 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
906 /* ------------------------------- */
907 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
908 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
909 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
910 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
911 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
912 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
913 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
914 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
915 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
916 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
917 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
918 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
919 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
920 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
921 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
922 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
923 /* ------------------------------- */
924 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
927 static const unsigned char twobyte_has_modrm
[256] = {
928 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
929 /* ------------------------------- */
930 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
931 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
932 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
933 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
934 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
935 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
936 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
937 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
938 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
939 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
940 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
941 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
942 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
943 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
944 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
945 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
946 /* ------------------------------- */
947 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
950 static int amd64_syscall_p (const struct amd64_insn
*insn
, int *lengthp
);
953 rex_prefix_p (gdb_byte pfx
)
955 return REX_PREFIX_P (pfx
);
958 /* Skip the legacy instruction prefixes in INSN.
959 We assume INSN is properly sentineled so we don't have to worry
960 about falling off the end of the buffer. */
963 amd64_skip_prefixes (gdb_byte
*insn
)
969 case DATA_PREFIX_OPCODE
:
970 case ADDR_PREFIX_OPCODE
:
971 case CS_PREFIX_OPCODE
:
972 case DS_PREFIX_OPCODE
:
973 case ES_PREFIX_OPCODE
:
974 case FS_PREFIX_OPCODE
:
975 case GS_PREFIX_OPCODE
:
976 case SS_PREFIX_OPCODE
:
977 case LOCK_PREFIX_OPCODE
:
978 case REPE_PREFIX_OPCODE
:
979 case REPNE_PREFIX_OPCODE
:
991 /* fprintf-function for amd64_insn_length.
992 This function is a nop, we don't want to print anything, we just want to
993 compute the length of the insn. */
995 static int ATTR_FORMAT (printf
, 2, 3)
996 amd64_insn_length_fprintf (void *stream
, const char *format
, ...)
1001 /* Initialize a struct disassemble_info for amd64_insn_length. */
1004 amd64_insn_length_init_dis (struct gdbarch
*gdbarch
,
1005 struct disassemble_info
*di
,
1006 const gdb_byte
*insn
, int max_len
,
1009 init_disassemble_info (di
, NULL
, amd64_insn_length_fprintf
);
1011 /* init_disassemble_info installs buffer_read_memory, etc.
1012 so we don't need to do that here.
1013 The cast is necessary until disassemble_info is const-ified. */
1014 di
->buffer
= (gdb_byte
*) insn
;
1015 di
->buffer_length
= max_len
;
1016 di
->buffer_vma
= addr
;
1018 di
->arch
= gdbarch_bfd_arch_info (gdbarch
)->arch
;
1019 di
->mach
= gdbarch_bfd_arch_info (gdbarch
)->mach
;
1020 di
->endian
= gdbarch_byte_order (gdbarch
);
1021 di
->endian_code
= gdbarch_byte_order_for_code (gdbarch
);
1023 disassemble_init_for_target (di
);
1026 /* Return the length in bytes of INSN.
1027 MAX_LEN is the size of the buffer containing INSN.
1028 libopcodes currently doesn't export a utility to compute the
1029 instruction length, so use the disassembler until then. */
1032 amd64_insn_length (struct gdbarch
*gdbarch
,
1033 const gdb_byte
*insn
, int max_len
, CORE_ADDR addr
)
1035 struct disassemble_info di
;
1037 amd64_insn_length_init_dis (gdbarch
, &di
, insn
, max_len
, addr
);
1039 return gdbarch_print_insn (gdbarch
, addr
, &di
);
1042 /* Return an integer register (other than RSP) that is unused as an input
1044 In order to not require adding a rex prefix if the insn doesn't already
1045 have one, the result is restricted to RAX ... RDI, sans RSP.
1046 The register numbering of the result follows architecture ordering,
1050 amd64_get_unused_input_int_reg (const struct amd64_insn
*details
)
1052 /* 1 bit for each reg */
1053 int used_regs_mask
= 0;
1055 /* There can be at most 3 int regs used as inputs in an insn, and we have
1056 7 to choose from (RAX ... RDI, sans RSP).
1057 This allows us to take a conservative approach and keep things simple.
1058 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1059 that implicitly specify RAX. */
1062 used_regs_mask
|= 1 << EAX_REG_NUM
;
1063 /* Similarily avoid RDX, implicit operand in divides. */
1064 used_regs_mask
|= 1 << EDX_REG_NUM
;
1066 used_regs_mask
|= 1 << ESP_REG_NUM
;
1068 /* If the opcode is one byte long and there's no ModRM byte,
1069 assume the opcode specifies a register. */
1070 if (details
->opcode_len
== 1 && details
->modrm_offset
== -1)
1071 used_regs_mask
|= 1 << (details
->raw_insn
[details
->opcode_offset
] & 7);
1073 /* Mark used regs in the modrm/sib bytes. */
1074 if (details
->modrm_offset
!= -1)
1076 int modrm
= details
->raw_insn
[details
->modrm_offset
];
1077 int mod
= MODRM_MOD_FIELD (modrm
);
1078 int reg
= MODRM_REG_FIELD (modrm
);
1079 int rm
= MODRM_RM_FIELD (modrm
);
1080 int have_sib
= mod
!= 3 && rm
== 4;
1082 /* Assume the reg field of the modrm byte specifies a register. */
1083 used_regs_mask
|= 1 << reg
;
1087 int base
= SIB_BASE_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1088 int index
= SIB_INDEX_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
1089 used_regs_mask
|= 1 << base
;
1090 used_regs_mask
|= 1 << index
;
1094 used_regs_mask
|= 1 << rm
;
1098 gdb_assert (used_regs_mask
< 256);
1099 gdb_assert (used_regs_mask
!= 255);
1101 /* Finally, find a free reg. */
1105 for (i
= 0; i
< 8; ++i
)
1107 if (! (used_regs_mask
& (1 << i
)))
1111 /* We shouldn't get here. */
1112 internal_error (__FILE__
, __LINE__
, _("unable to find free reg"));
1116 /* Extract the details of INSN that we need. */
1119 amd64_get_insn_details (gdb_byte
*insn
, struct amd64_insn
*details
)
1121 gdb_byte
*start
= insn
;
1124 details
->raw_insn
= insn
;
1126 details
->opcode_len
= -1;
1127 details
->rex_offset
= -1;
1128 details
->opcode_offset
= -1;
1129 details
->modrm_offset
= -1;
1131 /* Skip legacy instruction prefixes. */
1132 insn
= amd64_skip_prefixes (insn
);
1134 /* Skip REX instruction prefix. */
1135 if (rex_prefix_p (*insn
))
1137 details
->rex_offset
= insn
- start
;
1141 details
->opcode_offset
= insn
- start
;
1143 if (*insn
== TWO_BYTE_OPCODE_ESCAPE
)
1145 /* Two or three-byte opcode. */
1147 need_modrm
= twobyte_has_modrm
[*insn
];
1149 /* Check for three-byte opcode. */
1159 details
->opcode_len
= 3;
1162 details
->opcode_len
= 2;
1168 /* One-byte opcode. */
1169 need_modrm
= onebyte_has_modrm
[*insn
];
1170 details
->opcode_len
= 1;
1176 details
->modrm_offset
= insn
- start
;
1180 /* Update %rip-relative addressing in INSN.
1182 %rip-relative addressing only uses a 32-bit displacement.
1183 32 bits is not enough to be guaranteed to cover the distance between where
1184 the real instruction is and where its copy is.
1185 Convert the insn to use base+disp addressing.
1186 We set base = pc + insn_length so we can leave disp unchanged. */
1189 fixup_riprel (struct gdbarch
*gdbarch
, struct displaced_step_closure
*dsc
,
1190 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1192 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1193 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1194 int modrm_offset
= insn_details
->modrm_offset
;
1195 gdb_byte
*insn
= insn_details
->raw_insn
+ modrm_offset
;
1199 int arch_tmp_regno
, tmp_regno
;
1200 ULONGEST orig_value
;
1202 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1205 /* Compute the rip-relative address. */
1206 disp
= extract_signed_integer (insn
, sizeof (int32_t), byte_order
);
1207 insn_length
= amd64_insn_length (gdbarch
, dsc
->insn_buf
, dsc
->max_len
, from
);
1208 rip_base
= from
+ insn_length
;
1210 /* We need a register to hold the address.
1211 Pick one not used in the insn.
1212 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1213 arch_tmp_regno
= amd64_get_unused_input_int_reg (insn_details
);
1214 tmp_regno
= amd64_arch_reg_to_regnum (arch_tmp_regno
);
1216 /* REX.B should be unset as we were using rip-relative addressing,
1217 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1218 if (insn_details
->rex_offset
!= -1)
1219 dsc
->insn_buf
[insn_details
->rex_offset
] &= ~REX_B
;
1221 regcache_cooked_read_unsigned (regs
, tmp_regno
, &orig_value
);
1222 dsc
->tmp_regno
= tmp_regno
;
1223 dsc
->tmp_save
= orig_value
;
1226 /* Convert the ModRM field to be base+disp. */
1227 dsc
->insn_buf
[modrm_offset
] &= ~0xc7;
1228 dsc
->insn_buf
[modrm_offset
] |= 0x80 + arch_tmp_regno
;
1230 regcache_cooked_write_unsigned (regs
, tmp_regno
, rip_base
);
1232 if (debug_displaced
)
1233 fprintf_unfiltered (gdb_stdlog
, "displaced: %%rip-relative addressing used.\n"
1234 "displaced: using temp reg %d, old value %s, new value %s\n",
1235 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
),
1236 paddress (gdbarch
, rip_base
));
1240 fixup_displaced_copy (struct gdbarch
*gdbarch
,
1241 struct displaced_step_closure
*dsc
,
1242 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1244 const struct amd64_insn
*details
= &dsc
->insn_details
;
1246 if (details
->modrm_offset
!= -1)
1248 gdb_byte modrm
= details
->raw_insn
[details
->modrm_offset
];
1250 if ((modrm
& 0xc7) == 0x05)
1252 /* The insn uses rip-relative addressing.
1254 fixup_riprel (gdbarch
, dsc
, from
, to
, regs
);
1259 struct displaced_step_closure
*
1260 amd64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
1261 CORE_ADDR from
, CORE_ADDR to
,
1262 struct regcache
*regs
)
1264 int len
= gdbarch_max_insn_length (gdbarch
);
1265 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1266 continually watch for running off the end of the buffer. */
1267 int fixup_sentinel_space
= len
;
1268 struct displaced_step_closure
*dsc
=
1269 xmalloc (sizeof (*dsc
) + len
+ fixup_sentinel_space
);
1270 gdb_byte
*buf
= &dsc
->insn_buf
[0];
1271 struct amd64_insn
*details
= &dsc
->insn_details
;
1274 dsc
->max_len
= len
+ fixup_sentinel_space
;
1276 read_memory (from
, buf
, len
);
1278 /* Set up the sentinel space so we don't have to worry about running
1279 off the end of the buffer. An excessive number of leading prefixes
1280 could otherwise cause this. */
1281 memset (buf
+ len
, 0, fixup_sentinel_space
);
1283 amd64_get_insn_details (buf
, details
);
1285 /* GDB may get control back after the insn after the syscall.
1286 Presumably this is a kernel bug.
1287 If this is a syscall, make sure there's a nop afterwards. */
1291 if (amd64_syscall_p (details
, &syscall_length
))
1292 buf
[details
->opcode_offset
+ syscall_length
] = NOP_OPCODE
;
1295 /* Modify the insn to cope with the address where it will be executed from.
1296 In particular, handle any rip-relative addressing. */
1297 fixup_displaced_copy (gdbarch
, dsc
, from
, to
, regs
);
1299 write_memory (to
, buf
, len
);
1301 if (debug_displaced
)
1303 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
1304 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
1305 displaced_step_dump_bytes (gdb_stdlog
, buf
, len
);
1312 amd64_absolute_jmp_p (const struct amd64_insn
*details
)
1314 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1316 if (insn
[0] == 0xff)
1318 /* jump near, absolute indirect (/4) */
1319 if ((insn
[1] & 0x38) == 0x20)
1322 /* jump far, absolute indirect (/5) */
1323 if ((insn
[1] & 0x38) == 0x28)
1331 amd64_absolute_call_p (const struct amd64_insn
*details
)
1333 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1335 if (insn
[0] == 0xff)
1337 /* Call near, absolute indirect (/2) */
1338 if ((insn
[1] & 0x38) == 0x10)
1341 /* Call far, absolute indirect (/3) */
1342 if ((insn
[1] & 0x38) == 0x18)
1350 amd64_ret_p (const struct amd64_insn
*details
)
1352 /* NOTE: gcc can emit "repz ; ret". */
1353 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1357 case 0xc2: /* ret near, pop N bytes */
1358 case 0xc3: /* ret near */
1359 case 0xca: /* ret far, pop N bytes */
1360 case 0xcb: /* ret far */
1361 case 0xcf: /* iret */
1370 amd64_call_p (const struct amd64_insn
*details
)
1372 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1374 if (amd64_absolute_call_p (details
))
1377 /* call near, relative */
1378 if (insn
[0] == 0xe8)
1384 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1385 length in bytes. Otherwise, return zero. */
1388 amd64_syscall_p (const struct amd64_insn
*details
, int *lengthp
)
1390 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1392 if (insn
[0] == 0x0f && insn
[1] == 0x05)
1401 /* Fix up the state of registers and memory after having single-stepped
1402 a displaced instruction. */
1405 amd64_displaced_step_fixup (struct gdbarch
*gdbarch
,
1406 struct displaced_step_closure
*dsc
,
1407 CORE_ADDR from
, CORE_ADDR to
,
1408 struct regcache
*regs
)
1410 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1411 /* The offset we applied to the instruction's address. */
1412 ULONGEST insn_offset
= to
- from
;
1413 gdb_byte
*insn
= dsc
->insn_buf
;
1414 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1416 if (debug_displaced
)
1417 fprintf_unfiltered (gdb_stdlog
,
1418 "displaced: fixup (%s, %s), "
1419 "insn = 0x%02x 0x%02x ...\n",
1420 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1423 /* If we used a tmp reg, restore it. */
1427 if (debug_displaced
)
1428 fprintf_unfiltered (gdb_stdlog
, "displaced: restoring reg %d to %s\n",
1429 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
));
1430 regcache_cooked_write_unsigned (regs
, dsc
->tmp_regno
, dsc
->tmp_save
);
1433 /* The list of issues to contend with here is taken from
1434 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1435 Yay for Free Software! */
1437 /* Relocate the %rip back to the program's instruction stream,
1440 /* Except in the case of absolute or indirect jump or call
1441 instructions, or a return instruction, the new rip is relative to
1442 the displaced instruction; make it relative to the original insn.
1443 Well, signal handler returns don't need relocation either, but we use the
1444 value of %rip to recognize those; see below. */
1445 if (! amd64_absolute_jmp_p (insn_details
)
1446 && ! amd64_absolute_call_p (insn_details
)
1447 && ! amd64_ret_p (insn_details
))
1452 regcache_cooked_read_unsigned (regs
, AMD64_RIP_REGNUM
, &orig_rip
);
1454 /* A signal trampoline system call changes the %rip, resuming
1455 execution of the main program after the signal handler has
1456 returned. That makes them like 'return' instructions; we
1457 shouldn't relocate %rip.
1459 But most system calls don't, and we do need to relocate %rip.
1461 Our heuristic for distinguishing these cases: if stepping
1462 over the system call instruction left control directly after
1463 the instruction, the we relocate --- control almost certainly
1464 doesn't belong in the displaced copy. Otherwise, we assume
1465 the instruction has put control where it belongs, and leave
1466 it unrelocated. Goodness help us if there are PC-relative
1468 if (amd64_syscall_p (insn_details
, &insn_len
)
1469 && orig_rip
!= to
+ insn_len
1470 /* GDB can get control back after the insn after the syscall.
1471 Presumably this is a kernel bug.
1472 Fixup ensures its a nop, we add one to the length for it. */
1473 && orig_rip
!= to
+ insn_len
+ 1)
1475 if (debug_displaced
)
1476 fprintf_unfiltered (gdb_stdlog
,
1477 "displaced: syscall changed %%rip; "
1478 "not relocating\n");
1482 ULONGEST rip
= orig_rip
- insn_offset
;
1484 /* If we just stepped over a breakpoint insn, we don't backup
1485 the pc on purpose; this is to match behaviour without
1488 regcache_cooked_write_unsigned (regs
, AMD64_RIP_REGNUM
, rip
);
1490 if (debug_displaced
)
1491 fprintf_unfiltered (gdb_stdlog
,
1493 "relocated %%rip from %s to %s\n",
1494 paddress (gdbarch
, orig_rip
),
1495 paddress (gdbarch
, rip
));
1499 /* If the instruction was PUSHFL, then the TF bit will be set in the
1500 pushed value, and should be cleared. We'll leave this for later,
1501 since GDB already messes up the TF flag when stepping over a
1504 /* If the instruction was a call, the return address now atop the
1505 stack is the address following the copied instruction. We need
1506 to make it the address following the original instruction. */
1507 if (amd64_call_p (insn_details
))
1511 const ULONGEST retaddr_len
= 8;
1513 regcache_cooked_read_unsigned (regs
, AMD64_RSP_REGNUM
, &rsp
);
1514 retaddr
= read_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
);
1515 retaddr
= (retaddr
- insn_offset
) & 0xffffffffUL
;
1516 write_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
, retaddr
);
1518 if (debug_displaced
)
1519 fprintf_unfiltered (gdb_stdlog
,
1520 "displaced: relocated return addr at %s "
1522 paddress (gdbarch
, rsp
),
1523 paddress (gdbarch
, retaddr
));
1527 /* The maximum number of saved registers. This should include %rip. */
1528 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1530 struct amd64_frame_cache
1534 CORE_ADDR sp_offset
;
1537 /* Saved registers. */
1538 CORE_ADDR saved_regs
[AMD64_NUM_SAVED_REGS
];
1542 /* Do we have a frame? */
1546 /* Initialize a frame cache. */
1549 amd64_init_frame_cache (struct amd64_frame_cache
*cache
)
1555 cache
->sp_offset
= -8;
1558 /* Saved registers. We initialize these to -1 since zero is a valid
1559 offset (that's where %rbp is supposed to be stored).
1560 The values start out as being offsets, and are later converted to
1561 addresses (at which point -1 is interpreted as an address, still meaning
1563 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1564 cache
->saved_regs
[i
] = -1;
1565 cache
->saved_sp
= 0;
1566 cache
->saved_sp_reg
= -1;
1568 /* Frameless until proven otherwise. */
1569 cache
->frameless_p
= 1;
1572 /* Allocate and initialize a frame cache. */
1574 static struct amd64_frame_cache
*
1575 amd64_alloc_frame_cache (void)
1577 struct amd64_frame_cache
*cache
;
1579 cache
= FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache
);
1580 amd64_init_frame_cache (cache
);
1584 /* GCC 4.4 and later, can put code in the prologue to realign the
1585 stack pointer. Check whether PC points to such code, and update
1586 CACHE accordingly. Return the first instruction after the code
1587 sequence or CURRENT_PC, whichever is smaller. If we don't
1588 recognize the code, return PC. */
1591 amd64_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
1592 struct amd64_frame_cache
*cache
)
1594 /* There are 2 code sequences to re-align stack before the frame
1597 1. Use a caller-saved saved register:
1603 2. Use a callee-saved saved register:
1610 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1612 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1613 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1618 int offset
, offset_and
;
1620 if (target_read_memory (pc
, buf
, sizeof buf
))
1623 /* Check caller-saved saved register. The first instruction has
1624 to be "leaq 8(%rsp), %reg". */
1625 if ((buf
[0] & 0xfb) == 0x48
1630 /* MOD must be binary 10 and R/M must be binary 100. */
1631 if ((buf
[2] & 0xc7) != 0x44)
1634 /* REG has register number. */
1635 reg
= (buf
[2] >> 3) & 7;
1637 /* Check the REX.R bit. */
1645 /* Check callee-saved saved register. The first instruction
1646 has to be "pushq %reg". */
1648 if ((buf
[0] & 0xf8) == 0x50)
1650 else if ((buf
[0] & 0xf6) == 0x40
1651 && (buf
[1] & 0xf8) == 0x50)
1653 /* Check the REX.B bit. */
1654 if ((buf
[0] & 1) != 0)
1663 reg
+= buf
[offset
] & 0x7;
1667 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1668 if ((buf
[offset
] & 0xfb) != 0x48
1669 || buf
[offset
+ 1] != 0x8d
1670 || buf
[offset
+ 3] != 0x24
1671 || buf
[offset
+ 4] != 0x10)
1674 /* MOD must be binary 10 and R/M must be binary 100. */
1675 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
1678 /* REG has register number. */
1679 r
= (buf
[offset
+ 2] >> 3) & 7;
1681 /* Check the REX.R bit. */
1682 if (buf
[offset
] == 0x4c)
1685 /* Registers in pushq and leaq have to be the same. */
1692 /* Rigister can't be %rsp nor %rbp. */
1693 if (reg
== 4 || reg
== 5)
1696 /* The next instruction has to be "andq $-XXX, %rsp". */
1697 if (buf
[offset
] != 0x48
1698 || buf
[offset
+ 2] != 0xe4
1699 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
1702 offset_and
= offset
;
1703 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
1705 /* The next instruction has to be "pushq -8(%reg)". */
1707 if (buf
[offset
] == 0xff)
1709 else if ((buf
[offset
] & 0xf6) == 0x40
1710 && buf
[offset
+ 1] == 0xff)
1712 /* Check the REX.B bit. */
1713 if ((buf
[offset
] & 0x1) != 0)
1720 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1722 if (buf
[offset
+ 1] != 0xf8
1723 || (buf
[offset
] & 0xf8) != 0x70)
1726 /* R/M has register. */
1727 r
+= buf
[offset
] & 7;
1729 /* Registers in leaq and pushq have to be the same. */
1733 if (current_pc
> pc
+ offset_and
)
1734 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
1736 return min (pc
+ offset
+ 2, current_pc
);
1739 /* Do a limited analysis of the prologue at PC and update CACHE
1740 accordingly. Bail out early if CURRENT_PC is reached. Return the
1741 address where the analysis stopped.
1743 We will handle only functions beginning with:
1746 movq %rsp, %rbp 0x48 0x89 0xe5
1748 Any function that doesn't start with this sequence will be assumed
1749 to have no prologue and thus no valid frame pointer in %rbp. */
1752 amd64_analyze_prologue (struct gdbarch
*gdbarch
,
1753 CORE_ADDR pc
, CORE_ADDR current_pc
,
1754 struct amd64_frame_cache
*cache
)
1756 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1757 static gdb_byte proto
[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1761 if (current_pc
<= pc
)
1764 pc
= amd64_analyze_stack_align (pc
, current_pc
, cache
);
1766 op
= read_memory_unsigned_integer (pc
, 1, byte_order
);
1768 if (op
== 0x55) /* pushq %rbp */
1770 /* Take into account that we've executed the `pushq %rbp' that
1771 starts this instruction sequence. */
1772 cache
->saved_regs
[AMD64_RBP_REGNUM
] = 0;
1773 cache
->sp_offset
+= 8;
1775 /* If that's all, return now. */
1776 if (current_pc
<= pc
+ 1)
1779 /* Check for `movq %rsp, %rbp'. */
1780 read_memory (pc
+ 1, buf
, 3);
1781 if (memcmp (buf
, proto
, 3) != 0)
1784 /* OK, we actually have a frame. */
1785 cache
->frameless_p
= 0;
1792 /* Return PC of first real instruction. */
1795 amd64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR start_pc
)
1797 struct amd64_frame_cache cache
;
1800 amd64_init_frame_cache (&cache
);
1801 pc
= amd64_analyze_prologue (gdbarch
, start_pc
, 0xffffffffffffffffLL
,
1803 if (cache
.frameless_p
)
1810 /* Normal frames. */
1812 static struct amd64_frame_cache
*
1813 amd64_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
1815 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1816 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1817 struct amd64_frame_cache
*cache
;
1824 cache
= amd64_alloc_frame_cache ();
1825 *this_cache
= cache
;
1827 cache
->pc
= get_frame_func (this_frame
);
1829 amd64_analyze_prologue (gdbarch
, cache
->pc
, get_frame_pc (this_frame
),
1832 if (cache
->saved_sp_reg
!= -1)
1834 /* Stack pointer has been saved. */
1835 get_frame_register (this_frame
, cache
->saved_sp_reg
, buf
);
1836 cache
->saved_sp
= extract_unsigned_integer(buf
, 8, byte_order
);
1839 if (cache
->frameless_p
)
1841 /* We didn't find a valid frame. If we're at the start of a
1842 function, or somewhere half-way its prologue, the function's
1843 frame probably hasn't been fully setup yet. Try to
1844 reconstruct the base address for the stack frame by looking
1845 at the stack pointer. For truly "frameless" functions this
1848 if (cache
->saved_sp_reg
!= -1)
1850 /* We're halfway aligning the stack. */
1851 cache
->base
= ((cache
->saved_sp
- 8) & 0xfffffffffffffff0LL
) - 8;
1852 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->saved_sp
- 8;
1854 /* This will be added back below. */
1855 cache
->saved_regs
[AMD64_RIP_REGNUM
] -= cache
->base
;
1859 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
1860 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
)
1866 get_frame_register (this_frame
, AMD64_RBP_REGNUM
, buf
);
1867 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
);
1870 /* Now that we have the base address for the stack frame we can
1871 calculate the value of %rsp in the calling frame. */
1872 cache
->saved_sp
= cache
->base
+ 16;
1874 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1875 frame we find it at the same offset from the reconstructed base
1876 address. If we're halfway aligning the stack, %rip is handled
1877 differently (see above). */
1878 if (!cache
->frameless_p
|| cache
->saved_sp_reg
== -1)
1879 cache
->saved_regs
[AMD64_RIP_REGNUM
] = 8;
1881 /* Adjust all the saved registers such that they contain addresses
1882 instead of offsets. */
1883 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1884 if (cache
->saved_regs
[i
] != -1)
1885 cache
->saved_regs
[i
] += cache
->base
;
1891 amd64_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1892 struct frame_id
*this_id
)
1894 struct amd64_frame_cache
*cache
=
1895 amd64_frame_cache (this_frame
, this_cache
);
1897 /* This marks the outermost frame. */
1898 if (cache
->base
== 0)
1901 (*this_id
) = frame_id_build (cache
->base
+ 16, cache
->pc
);
1904 static struct value
*
1905 amd64_frame_prev_register (struct frame_info
*this_frame
, void **this_cache
,
1908 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1909 struct amd64_frame_cache
*cache
=
1910 amd64_frame_cache (this_frame
, this_cache
);
1912 gdb_assert (regnum
>= 0);
1914 if (regnum
== gdbarch_sp_regnum (gdbarch
) && cache
->saved_sp
)
1915 return frame_unwind_got_constant (this_frame
, regnum
, cache
->saved_sp
);
1917 if (regnum
< AMD64_NUM_SAVED_REGS
&& cache
->saved_regs
[regnum
] != -1)
1918 return frame_unwind_got_memory (this_frame
, regnum
,
1919 cache
->saved_regs
[regnum
]);
1921 return frame_unwind_got_register (this_frame
, regnum
, regnum
);
1924 static const struct frame_unwind amd64_frame_unwind
=
1927 amd64_frame_this_id
,
1928 amd64_frame_prev_register
,
1930 default_frame_sniffer
1934 /* Signal trampolines. */
1936 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1937 64-bit variants. This would require using identical frame caches
1938 on both platforms. */
1940 static struct amd64_frame_cache
*
1941 amd64_sigtramp_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
1943 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1944 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1945 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1946 struct amd64_frame_cache
*cache
;
1954 cache
= amd64_alloc_frame_cache ();
1956 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
1957 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
) - 8;
1959 addr
= tdep
->sigcontext_addr (this_frame
);
1960 gdb_assert (tdep
->sc_reg_offset
);
1961 gdb_assert (tdep
->sc_num_regs
<= AMD64_NUM_SAVED_REGS
);
1962 for (i
= 0; i
< tdep
->sc_num_regs
; i
++)
1963 if (tdep
->sc_reg_offset
[i
] != -1)
1964 cache
->saved_regs
[i
] = addr
+ tdep
->sc_reg_offset
[i
];
1966 *this_cache
= cache
;
1971 amd64_sigtramp_frame_this_id (struct frame_info
*this_frame
,
1972 void **this_cache
, struct frame_id
*this_id
)
1974 struct amd64_frame_cache
*cache
=
1975 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
1977 (*this_id
) = frame_id_build (cache
->base
+ 16, get_frame_pc (this_frame
));
1980 static struct value
*
1981 amd64_sigtramp_frame_prev_register (struct frame_info
*this_frame
,
1982 void **this_cache
, int regnum
)
1984 /* Make sure we've initialized the cache. */
1985 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
1987 return amd64_frame_prev_register (this_frame
, this_cache
, regnum
);
1991 amd64_sigtramp_frame_sniffer (const struct frame_unwind
*self
,
1992 struct frame_info
*this_frame
,
1995 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1997 /* We shouldn't even bother if we don't have a sigcontext_addr
1999 if (tdep
->sigcontext_addr
== NULL
)
2002 if (tdep
->sigtramp_p
!= NULL
)
2004 if (tdep
->sigtramp_p (this_frame
))
2008 if (tdep
->sigtramp_start
!= 0)
2010 CORE_ADDR pc
= get_frame_pc (this_frame
);
2012 gdb_assert (tdep
->sigtramp_end
!= 0);
2013 if (pc
>= tdep
->sigtramp_start
&& pc
< tdep
->sigtramp_end
)
2020 static const struct frame_unwind amd64_sigtramp_frame_unwind
=
2023 amd64_sigtramp_frame_this_id
,
2024 amd64_sigtramp_frame_prev_register
,
2026 amd64_sigtramp_frame_sniffer
2031 amd64_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
2033 struct amd64_frame_cache
*cache
=
2034 amd64_frame_cache (this_frame
, this_cache
);
2039 static const struct frame_base amd64_frame_base
=
2041 &amd64_frame_unwind
,
2042 amd64_frame_base_address
,
2043 amd64_frame_base_address
,
2044 amd64_frame_base_address
2047 /* Normal frames, but in a function epilogue. */
2049 /* The epilogue is defined here as the 'ret' instruction, which will
2050 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2051 the function's stack frame. */
2054 amd64_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2058 if (target_read_memory (pc
, &insn
, 1))
2059 return 0; /* Can't read memory at pc. */
2061 if (insn
!= 0xc3) /* 'ret' instruction. */
2068 amd64_epilogue_frame_sniffer (const struct frame_unwind
*self
,
2069 struct frame_info
*this_frame
,
2070 void **this_prologue_cache
)
2072 if (frame_relative_level (this_frame
) == 0)
2073 return amd64_in_function_epilogue_p (get_frame_arch (this_frame
),
2074 get_frame_pc (this_frame
));
2079 static struct amd64_frame_cache
*
2080 amd64_epilogue_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
2082 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
2083 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2084 struct amd64_frame_cache
*cache
;
2090 cache
= amd64_alloc_frame_cache ();
2091 *this_cache
= cache
;
2093 /* Cache base will be %esp plus cache->sp_offset (-8). */
2094 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
2095 cache
->base
= extract_unsigned_integer (buf
, 8,
2096 byte_order
) + cache
->sp_offset
;
2098 /* Cache pc will be the frame func. */
2099 cache
->pc
= get_frame_pc (this_frame
);
2101 /* The saved %esp will be at cache->base plus 16. */
2102 cache
->saved_sp
= cache
->base
+ 16;
2104 /* The saved %eip will be at cache->base plus 8. */
2105 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->base
+ 8;
2111 amd64_epilogue_frame_this_id (struct frame_info
*this_frame
,
2113 struct frame_id
*this_id
)
2115 struct amd64_frame_cache
*cache
= amd64_epilogue_frame_cache (this_frame
,
2118 (*this_id
) = frame_id_build (cache
->base
+ 8, cache
->pc
);
2121 static const struct frame_unwind amd64_epilogue_frame_unwind
=
2124 amd64_epilogue_frame_this_id
,
2125 amd64_frame_prev_register
,
2127 amd64_epilogue_frame_sniffer
2130 static struct frame_id
2131 amd64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2135 fp
= get_frame_register_unsigned (this_frame
, AMD64_RBP_REGNUM
);
2137 return frame_id_build (fp
+ 16, get_frame_pc (this_frame
));
2140 /* 16 byte align the SP per frame requirements. */
2143 amd64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
2145 return sp
& -(CORE_ADDR
)16;
2149 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2150 in the floating-point register set REGSET to register cache
2151 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2154 amd64_supply_fpregset (const struct regset
*regset
, struct regcache
*regcache
,
2155 int regnum
, const void *fpregs
, size_t len
)
2157 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (regset
->arch
);
2159 gdb_assert (len
== tdep
->sizeof_fpregset
);
2160 amd64_supply_fxsave (regcache
, regnum
, fpregs
);
2163 /* Collect register REGNUM from the register cache REGCACHE and store
2164 it in the buffer specified by FPREGS and LEN as described by the
2165 floating-point register set REGSET. If REGNUM is -1, do this for
2166 all registers in REGSET. */
2169 amd64_collect_fpregset (const struct regset
*regset
,
2170 const struct regcache
*regcache
,
2171 int regnum
, void *fpregs
, size_t len
)
2173 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (regset
->arch
);
2175 gdb_assert (len
== tdep
->sizeof_fpregset
);
2176 amd64_collect_fxsave (regcache
, regnum
, fpregs
);
2179 /* Return the appropriate register set for the core section identified
2180 by SECT_NAME and SECT_SIZE. */
2182 static const struct regset
*
2183 amd64_regset_from_core_section (struct gdbarch
*gdbarch
,
2184 const char *sect_name
, size_t sect_size
)
2186 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2188 if (strcmp (sect_name
, ".reg2") == 0 && sect_size
== tdep
->sizeof_fpregset
)
2190 if (tdep
->fpregset
== NULL
)
2191 tdep
->fpregset
= regset_alloc (gdbarch
, amd64_supply_fpregset
,
2192 amd64_collect_fpregset
);
2194 return tdep
->fpregset
;
2197 return i386_regset_from_core_section (gdbarch
, sect_name
, sect_size
);
2201 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2202 %rdi. We expect its value to be a pointer to the jmp_buf structure
2203 from which we extract the address that we will land at. This
2204 address is copied into PC. This routine returns non-zero on
2208 amd64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2212 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2213 int jb_pc_offset
= gdbarch_tdep (gdbarch
)->jb_pc_offset
;
2214 int len
= TYPE_LENGTH (builtin_type (gdbarch
)->builtin_func_ptr
);
2216 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2217 longjmp will land. */
2218 if (jb_pc_offset
== -1)
2221 get_frame_register (frame
, AMD64_RDI_REGNUM
, buf
);
2222 jb_addr
= extract_typed_address
2223 (buf
, builtin_type (gdbarch
)->builtin_data_ptr
);
2224 if (target_read_memory (jb_addr
+ jb_pc_offset
, buf
, len
))
2227 *pc
= extract_typed_address (buf
, builtin_type (gdbarch
)->builtin_func_ptr
);
2232 static const int amd64_record_regmap
[] =
2234 AMD64_RAX_REGNUM
, AMD64_RCX_REGNUM
, AMD64_RDX_REGNUM
, AMD64_RBX_REGNUM
,
2235 AMD64_RSP_REGNUM
, AMD64_RBP_REGNUM
, AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
2236 AMD64_R8_REGNUM
, AMD64_R9_REGNUM
, AMD64_R10_REGNUM
, AMD64_R11_REGNUM
,
2237 AMD64_R12_REGNUM
, AMD64_R13_REGNUM
, AMD64_R14_REGNUM
, AMD64_R15_REGNUM
,
2238 AMD64_RIP_REGNUM
, AMD64_EFLAGS_REGNUM
, AMD64_CS_REGNUM
, AMD64_SS_REGNUM
,
2239 AMD64_DS_REGNUM
, AMD64_ES_REGNUM
, AMD64_FS_REGNUM
, AMD64_GS_REGNUM
2243 amd64_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
)
2245 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2246 const struct target_desc
*tdesc
= info
.target_desc
;
2248 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2249 floating-point registers. */
2250 tdep
->sizeof_fpregset
= I387_SIZEOF_FXSAVE
;
2252 if (! tdesc_has_registers (tdesc
))
2253 tdesc
= tdesc_amd64
;
2254 tdep
->tdesc
= tdesc
;
2256 tdep
->num_core_regs
= AMD64_NUM_GREGS
+ I387_NUM_REGS
;
2257 tdep
->register_names
= amd64_register_names
;
2259 tdep
->num_byte_regs
= 20;
2260 tdep
->num_word_regs
= 16;
2261 tdep
->num_dword_regs
= 16;
2262 /* Avoid wiring in the MMX registers for now. */
2263 tdep
->num_mmx_regs
= 0;
2265 set_gdbarch_pseudo_register_read (gdbarch
,
2266 amd64_pseudo_register_read
);
2267 set_gdbarch_pseudo_register_write (gdbarch
,
2268 amd64_pseudo_register_write
);
2270 set_tdesc_pseudo_register_name (gdbarch
, amd64_pseudo_register_name
);
2272 /* AMD64 has an FPU and 16 SSE registers. */
2273 tdep
->st0_regnum
= AMD64_ST0_REGNUM
;
2274 tdep
->num_xmm_regs
= 16;
2276 /* This is what all the fuss is about. */
2277 set_gdbarch_long_bit (gdbarch
, 64);
2278 set_gdbarch_long_long_bit (gdbarch
, 64);
2279 set_gdbarch_ptr_bit (gdbarch
, 64);
2281 /* In contrast to the i386, on AMD64 a `long double' actually takes
2282 up 128 bits, even though it's still based on the i387 extended
2283 floating-point format which has only 80 significant bits. */
2284 set_gdbarch_long_double_bit (gdbarch
, 128);
2286 set_gdbarch_num_regs (gdbarch
, AMD64_NUM_REGS
);
2288 /* Register numbers of various important registers. */
2289 set_gdbarch_sp_regnum (gdbarch
, AMD64_RSP_REGNUM
); /* %rsp */
2290 set_gdbarch_pc_regnum (gdbarch
, AMD64_RIP_REGNUM
); /* %rip */
2291 set_gdbarch_ps_regnum (gdbarch
, AMD64_EFLAGS_REGNUM
); /* %eflags */
2292 set_gdbarch_fp0_regnum (gdbarch
, AMD64_ST0_REGNUM
); /* %st(0) */
2294 /* The "default" register numbering scheme for AMD64 is referred to
2295 as the "DWARF Register Number Mapping" in the System V psABI.
2296 The preferred debugging format for all known AMD64 targets is
2297 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2298 DWARF-1), but we provide the same mapping just in case. This
2299 mapping is also used for stabs, which GCC does support. */
2300 set_gdbarch_stab_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
2301 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
2303 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2304 be in use on any of the supported AMD64 targets. */
2306 /* Call dummy code. */
2307 set_gdbarch_push_dummy_call (gdbarch
, amd64_push_dummy_call
);
2308 set_gdbarch_frame_align (gdbarch
, amd64_frame_align
);
2309 set_gdbarch_frame_red_zone_size (gdbarch
, 128);
2310 tdep
->call_dummy_num_integer_regs
=
2311 ARRAY_SIZE (amd64_dummy_call_integer_regs
);
2312 tdep
->call_dummy_integer_regs
= amd64_dummy_call_integer_regs
;
2313 tdep
->classify
= amd64_classify
;
2315 set_gdbarch_convert_register_p (gdbarch
, i387_convert_register_p
);
2316 set_gdbarch_register_to_value (gdbarch
, i387_register_to_value
);
2317 set_gdbarch_value_to_register (gdbarch
, i387_value_to_register
);
2319 set_gdbarch_return_value (gdbarch
, amd64_return_value
);
2321 set_gdbarch_skip_prologue (gdbarch
, amd64_skip_prologue
);
2323 tdep
->record_regmap
= amd64_record_regmap
;
2325 set_gdbarch_dummy_id (gdbarch
, amd64_dummy_id
);
2327 /* Hook the function epilogue frame unwinder. This unwinder is
2328 appended to the list first, so that it supercedes the other
2329 unwinders in function epilogues. */
2330 frame_unwind_prepend_unwinder (gdbarch
, &amd64_epilogue_frame_unwind
);
2332 /* Hook the prologue-based frame unwinders. */
2333 frame_unwind_append_unwinder (gdbarch
, &amd64_sigtramp_frame_unwind
);
2334 frame_unwind_append_unwinder (gdbarch
, &amd64_frame_unwind
);
2335 frame_base_set_default (gdbarch
, &amd64_frame_base
);
2337 /* If we have a register mapping, enable the generic core file support. */
2338 if (tdep
->gregset_reg_offset
)
2339 set_gdbarch_regset_from_core_section (gdbarch
,
2340 amd64_regset_from_core_section
);
2342 set_gdbarch_get_longjmp_target (gdbarch
, amd64_get_longjmp_target
);
2345 /* Provide a prototype to silence -Wmissing-prototypes. */
2346 void _initialize_amd64_tdep (void);
2349 _initialize_amd64_tdep (void)
2351 initialize_tdesc_amd64 ();
2355 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2356 sense that the instruction pointer and data pointer are simply
2357 64-bit offsets into the code segment and the data segment instead
2358 of a selector offset pair. The functions below store the upper 32
2359 bits of these pointers (instead of just the 16-bits of the segment
2362 /* Fill register REGNUM in REGCACHE with the appropriate
2363 floating-point or SSE register value from *FXSAVE. If REGNUM is
2364 -1, do this for all registers. This function masks off any of the
2365 reserved bits in *FXSAVE. */
2368 amd64_supply_fxsave (struct regcache
*regcache
, int regnum
,
2371 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2372 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2374 i387_supply_fxsave (regcache
, regnum
, fxsave
);
2376 if (fxsave
&& gdbarch_ptr_bit (gdbarch
) == 64)
2378 const gdb_byte
*regs
= fxsave
;
2380 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
2381 regcache_raw_supply (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
2382 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
2383 regcache_raw_supply (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);
2387 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2388 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2389 all registers. This function doesn't touch any of the reserved
2393 amd64_collect_fxsave (const struct regcache
*regcache
, int regnum
,
2396 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2397 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2398 gdb_byte
*regs
= fxsave
;
2400 i387_collect_fxsave (regcache
, regnum
, fxsave
);
2402 if (gdbarch_ptr_bit (gdbarch
) == 64)
2404 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
2405 regcache_raw_collect (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
2406 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
2407 regcache_raw_collect (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);