1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 Contributed by Jiri Smid, SuSE Labs.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "opcode/i386.h"
26 #include "arch-utils.h"
28 #include "dummy-frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
40 #include "gdb_assert.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
45 #include "features/i386/amd64.c"
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
54 /* Register information. */
56 static const char *amd64_register_names
[] =
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs
[] =
80 AMD64_RDI_REGNUM
, /* %rdi */
81 AMD64_RSI_REGNUM
, /* %rsi */
82 AMD64_RDX_REGNUM
, /* %rdx */
83 AMD64_RCX_REGNUM
, /* %rcx */
88 /* DWARF Register Number Mapping as defined in the System V psABI,
91 static int amd64_dwarf_regmap
[] =
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
,
95 AMD64_RCX_REGNUM
, AMD64_RBX_REGNUM
,
96 AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
98 /* Frame Pointer Register RBP. */
101 /* Stack Pointer Register RSP. */
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
107 /* Return Address RA. Mapped to RIP. */
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
112 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
113 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
114 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM
+ 8, AMD64_XMM0_REGNUM
+ 9,
118 AMD64_XMM0_REGNUM
+ 10, AMD64_XMM0_REGNUM
+ 11,
119 AMD64_XMM0_REGNUM
+ 12, AMD64_XMM0_REGNUM
+ 13,
120 AMD64_XMM0_REGNUM
+ 14, AMD64_XMM0_REGNUM
+ 15,
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM
+ 0, AMD64_ST0_REGNUM
+ 1,
124 AMD64_ST0_REGNUM
+ 2, AMD64_ST0_REGNUM
+ 3,
125 AMD64_ST0_REGNUM
+ 4, AMD64_ST0_REGNUM
+ 5,
126 AMD64_ST0_REGNUM
+ 6, AMD64_ST0_REGNUM
+ 7,
128 /* Control and Status Flags Register. */
131 /* Selector Registers. */
141 /* Segment Base Address Registers. */
147 /* Special Selector Registers. */
151 /* Floating Point Control Registers. */
157 static const int amd64_dwarf_regmap_len
=
158 (sizeof (amd64_dwarf_regmap
) / sizeof (amd64_dwarf_regmap
[0]));
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
164 amd64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
168 if (reg
>= 0 && reg
< amd64_dwarf_regmap_len
)
169 regnum
= amd64_dwarf_regmap
[reg
];
172 warning (_("Unmapped DWARF Register #%d encountered."), reg
);
177 /* Map architectural register numbers to gdb register numbers. */
179 static const int amd64_arch_regmap
[16] =
181 AMD64_RAX_REGNUM
, /* %rax */
182 AMD64_RCX_REGNUM
, /* %rcx */
183 AMD64_RDX_REGNUM
, /* %rdx */
184 AMD64_RBX_REGNUM
, /* %rbx */
185 AMD64_RSP_REGNUM
, /* %rsp */
186 AMD64_RBP_REGNUM
, /* %rbp */
187 AMD64_RSI_REGNUM
, /* %rsi */
188 AMD64_RDI_REGNUM
, /* %rdi */
189 AMD64_R8_REGNUM
, /* %r8 */
190 AMD64_R9_REGNUM
, /* %r9 */
191 AMD64_R10_REGNUM
, /* %r10 */
192 AMD64_R11_REGNUM
, /* %r11 */
193 AMD64_R12_REGNUM
, /* %r12 */
194 AMD64_R13_REGNUM
, /* %r13 */
195 AMD64_R14_REGNUM
, /* %r14 */
196 AMD64_R15_REGNUM
/* %r15 */
199 static const int amd64_arch_regmap_len
=
200 (sizeof (amd64_arch_regmap
) / sizeof (amd64_arch_regmap
[0]));
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
206 amd64_arch_reg_to_regnum (int reg
)
208 gdb_assert (reg
>= 0 && reg
< amd64_arch_regmap_len
);
210 return amd64_arch_regmap
[reg
];
215 /* Return the union class of CLASS1 and CLASS2. See the psABI for
218 static enum amd64_reg_class
219 amd64_merge_classes (enum amd64_reg_class class1
, enum amd64_reg_class class2
)
221 /* Rule (a): If both classes are equal, this is the resulting class. */
222 if (class1
== class2
)
225 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
226 is the other class. */
227 if (class1
== AMD64_NO_CLASS
)
229 if (class2
== AMD64_NO_CLASS
)
232 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
233 if (class1
== AMD64_MEMORY
|| class2
== AMD64_MEMORY
)
236 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
237 if (class1
== AMD64_INTEGER
|| class2
== AMD64_INTEGER
)
238 return AMD64_INTEGER
;
240 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
241 MEMORY is used as class. */
242 if (class1
== AMD64_X87
|| class1
== AMD64_X87UP
243 || class1
== AMD64_COMPLEX_X87
|| class2
== AMD64_X87
244 || class2
== AMD64_X87UP
|| class2
== AMD64_COMPLEX_X87
)
247 /* Rule (f): Otherwise class SSE is used. */
251 /* Return non-zero if TYPE is a non-POD structure or union type. */
254 amd64_non_pod_p (struct type
*type
)
256 /* ??? A class with a base class certainly isn't POD, but does this
257 catch all non-POD structure types? */
258 if (TYPE_CODE (type
) == TYPE_CODE_STRUCT
&& TYPE_N_BASECLASSES (type
) > 0)
264 /* Classify TYPE according to the rules for aggregate (structures and
265 arrays) and union types, and store the result in CLASS. */
268 amd64_classify_aggregate (struct type
*type
, enum amd64_reg_class
class[2])
270 int len
= TYPE_LENGTH (type
);
272 /* 1. If the size of an object is larger than two eightbytes, or in
273 C++, is a non-POD structure or union type, or contains
274 unaligned fields, it has class memory. */
275 if (len
> 16 || amd64_non_pod_p (type
))
277 class[0] = class[1] = AMD64_MEMORY
;
281 /* 2. Both eightbytes get initialized to class NO_CLASS. */
282 class[0] = class[1] = AMD64_NO_CLASS
;
284 /* 3. Each field of an object is classified recursively so that
285 always two fields are considered. The resulting class is
286 calculated according to the classes of the fields in the
289 if (TYPE_CODE (type
) == TYPE_CODE_ARRAY
)
291 struct type
*subtype
= check_typedef (TYPE_TARGET_TYPE (type
));
293 /* All fields in an array have the same type. */
294 amd64_classify (subtype
, class);
295 if (len
> 8 && class[1] == AMD64_NO_CLASS
)
302 /* Structure or union. */
303 gdb_assert (TYPE_CODE (type
) == TYPE_CODE_STRUCT
304 || TYPE_CODE (type
) == TYPE_CODE_UNION
);
306 for (i
= 0; i
< TYPE_NFIELDS (type
); i
++)
308 struct type
*subtype
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
309 int pos
= TYPE_FIELD_BITPOS (type
, i
) / 64;
310 enum amd64_reg_class subclass
[2];
311 int bitsize
= TYPE_FIELD_BITSIZE (type
, i
);
315 bitsize
= TYPE_LENGTH (subtype
) * 8;
316 endpos
= (TYPE_FIELD_BITPOS (type
, i
) + bitsize
- 1) / 64;
318 /* Ignore static fields. */
319 if (field_is_static (&TYPE_FIELD (type
, i
)))
322 gdb_assert (pos
== 0 || pos
== 1);
324 amd64_classify (subtype
, subclass
);
325 class[pos
] = amd64_merge_classes (class[pos
], subclass
[0]);
326 if (bitsize
<= 64 && pos
== 0 && endpos
== 1)
327 /* This is a bit of an odd case: We have a field that would
328 normally fit in one of the two eightbytes, except that
329 it is placed in a way that this field straddles them.
330 This has been seen with a structure containing an array.
332 The ABI is a bit unclear in this case, but we assume that
333 this field's class (stored in subclass[0]) must also be merged
334 into class[1]. In other words, our field has a piece stored
335 in the second eight-byte, and thus its class applies to
336 the second eight-byte as well.
338 In the case where the field length exceeds 8 bytes,
339 it should not be necessary to merge the field class
340 into class[1]. As LEN > 8, subclass[1] is necessarily
341 different from AMD64_NO_CLASS. If subclass[1] is equal
342 to subclass[0], then the normal class[1]/subclass[1]
343 merging will take care of everything. For subclass[1]
344 to be different from subclass[0], I can only see the case
345 where we have a SSE/SSEUP or X87/X87UP pair, which both
346 use up all 16 bytes of the aggregate, and are already
347 handled just fine (because each portion sits on its own
349 class[1] = amd64_merge_classes (class[1], subclass
[0]);
351 class[1] = amd64_merge_classes (class[1], subclass
[1]);
355 /* 4. Then a post merger cleanup is done: */
357 /* Rule (a): If one of the classes is MEMORY, the whole argument is
359 if (class[0] == AMD64_MEMORY
|| class[1] == AMD64_MEMORY
)
360 class[0] = class[1] = AMD64_MEMORY
;
362 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
364 if (class[0] == AMD64_SSEUP
)
365 class[0] = AMD64_SSE
;
366 if (class[1] == AMD64_SSEUP
&& class[0] != AMD64_SSE
)
367 class[1] = AMD64_SSE
;
370 /* Classify TYPE, and store the result in CLASS. */
373 amd64_classify (struct type
*type
, enum amd64_reg_class
class[2])
375 enum type_code code
= TYPE_CODE (type
);
376 int len
= TYPE_LENGTH (type
);
378 class[0] = class[1] = AMD64_NO_CLASS
;
380 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
381 long, long long, and pointers are in the INTEGER class. Similarly,
382 range types, used by languages such as Ada, are also in the INTEGER
384 if ((code
== TYPE_CODE_INT
|| code
== TYPE_CODE_ENUM
385 || code
== TYPE_CODE_BOOL
|| code
== TYPE_CODE_RANGE
386 || code
== TYPE_CODE_CHAR
387 || code
== TYPE_CODE_PTR
|| code
== TYPE_CODE_REF
)
388 && (len
== 1 || len
== 2 || len
== 4 || len
== 8))
389 class[0] = AMD64_INTEGER
;
391 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
393 else if ((code
== TYPE_CODE_FLT
|| code
== TYPE_CODE_DECFLOAT
)
394 && (len
== 4 || len
== 8))
396 class[0] = AMD64_SSE
;
398 /* Arguments of types __float128, _Decimal128 and __m128 are split into
399 two halves. The least significant ones belong to class SSE, the most
400 significant one to class SSEUP. */
401 else if (code
== TYPE_CODE_DECFLOAT
&& len
== 16)
402 /* FIXME: __float128, __m128. */
403 class[0] = AMD64_SSE
, class[1] = AMD64_SSEUP
;
405 /* The 64-bit mantissa of arguments of type long double belongs to
406 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
408 else if (code
== TYPE_CODE_FLT
&& len
== 16)
409 /* Class X87 and X87UP. */
410 class[0] = AMD64_X87
, class[1] = AMD64_X87UP
;
413 else if (code
== TYPE_CODE_ARRAY
|| code
== TYPE_CODE_STRUCT
414 || code
== TYPE_CODE_UNION
)
415 amd64_classify_aggregate (type
, class);
418 static enum return_value_convention
419 amd64_return_value (struct gdbarch
*gdbarch
, struct type
*func_type
,
420 struct type
*type
, struct regcache
*regcache
,
421 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
423 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
424 enum amd64_reg_class
class[2];
425 int len
= TYPE_LENGTH (type
);
426 static int integer_regnum
[] = { AMD64_RAX_REGNUM
, AMD64_RDX_REGNUM
};
427 static int sse_regnum
[] = { AMD64_XMM0_REGNUM
, AMD64_XMM1_REGNUM
};
432 gdb_assert (!(readbuf
&& writebuf
));
433 gdb_assert (tdep
->classify
);
435 /* 1. Classify the return type with the classification algorithm. */
436 tdep
->classify (type
, class);
438 /* 2. If the type has class MEMORY, then the caller provides space
439 for the return value and passes the address of this storage in
440 %rdi as if it were the first argument to the function. In effect,
441 this address becomes a hidden first argument.
443 On return %rax will contain the address that has been passed in
444 by the caller in %rdi. */
445 if (class[0] == AMD64_MEMORY
)
447 /* As indicated by the comment above, the ABI guarantees that we
448 can always find the return value just after the function has
455 regcache_raw_read_unsigned (regcache
, AMD64_RAX_REGNUM
, &addr
);
456 read_memory (addr
, readbuf
, TYPE_LENGTH (type
));
459 return RETURN_VALUE_ABI_RETURNS_ADDRESS
;
462 gdb_assert (class[1] != AMD64_MEMORY
);
463 gdb_assert (len
<= 16);
465 for (i
= 0; len
> 0; i
++, len
-= 8)
473 /* 3. If the class is INTEGER, the next available register
474 of the sequence %rax, %rdx is used. */
475 regnum
= integer_regnum
[integer_reg
++];
479 /* 4. If the class is SSE, the next available SSE register
480 of the sequence %xmm0, %xmm1 is used. */
481 regnum
= sse_regnum
[sse_reg
++];
485 /* 5. If the class is SSEUP, the eightbyte is passed in the
486 upper half of the last used SSE register. */
487 gdb_assert (sse_reg
> 0);
488 regnum
= sse_regnum
[sse_reg
- 1];
493 /* 6. If the class is X87, the value is returned on the X87
494 stack in %st0 as 80-bit x87 number. */
495 regnum
= AMD64_ST0_REGNUM
;
497 i387_return_value (gdbarch
, regcache
);
501 /* 7. If the class is X87UP, the value is returned together
502 with the previous X87 value in %st0. */
503 gdb_assert (i
> 0 && class[0] == AMD64_X87
);
504 regnum
= AMD64_ST0_REGNUM
;
513 gdb_assert (!"Unexpected register class.");
516 gdb_assert (regnum
!= -1);
519 regcache_raw_read_part (regcache
, regnum
, offset
, min (len
, 8),
522 regcache_raw_write_part (regcache
, regnum
, offset
, min (len
, 8),
526 return RETURN_VALUE_REGISTER_CONVENTION
;
531 amd64_push_arguments (struct regcache
*regcache
, int nargs
,
532 struct value
**args
, CORE_ADDR sp
, int struct_return
)
534 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
535 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
536 int *integer_regs
= tdep
->call_dummy_integer_regs
;
537 int num_integer_regs
= tdep
->call_dummy_num_integer_regs
;
539 static int sse_regnum
[] =
541 /* %xmm0 ... %xmm7 */
542 AMD64_XMM0_REGNUM
+ 0, AMD64_XMM1_REGNUM
,
543 AMD64_XMM0_REGNUM
+ 2, AMD64_XMM0_REGNUM
+ 3,
544 AMD64_XMM0_REGNUM
+ 4, AMD64_XMM0_REGNUM
+ 5,
545 AMD64_XMM0_REGNUM
+ 6, AMD64_XMM0_REGNUM
+ 7,
547 struct value
**stack_args
= alloca (nargs
* sizeof (struct value
*));
548 /* An array that mirrors the stack_args array. For all arguments
549 that are passed by MEMORY, if that argument's address also needs
550 to be stored in a register, the ARG_ADDR_REGNO array will contain
551 that register number (or a negative value otherwise). */
552 int *arg_addr_regno
= alloca (nargs
* sizeof (int));
553 int num_stack_args
= 0;
554 int num_elements
= 0;
560 gdb_assert (tdep
->classify
);
562 /* Reserve a register for the "hidden" argument. */
566 for (i
= 0; i
< nargs
; i
++)
568 struct type
*type
= value_type (args
[i
]);
569 int len
= TYPE_LENGTH (type
);
570 enum amd64_reg_class
class[2];
571 int needed_integer_regs
= 0;
572 int needed_sse_regs
= 0;
575 /* Classify argument. */
576 tdep
->classify (type
, class);
578 /* Calculate the number of integer and SSE registers needed for
580 for (j
= 0; j
< 2; j
++)
582 if (class[j
] == AMD64_INTEGER
)
583 needed_integer_regs
++;
584 else if (class[j
] == AMD64_SSE
)
588 /* Check whether enough registers are available, and if the
589 argument should be passed in registers at all. */
590 if (integer_reg
+ needed_integer_regs
> num_integer_regs
591 || sse_reg
+ needed_sse_regs
> ARRAY_SIZE (sse_regnum
)
592 || (needed_integer_regs
== 0 && needed_sse_regs
== 0))
594 /* The argument will be passed on the stack. */
595 num_elements
+= ((len
+ 7) / 8);
596 stack_args
[num_stack_args
] = args
[i
];
597 /* If this is an AMD64_MEMORY argument whose address must also
598 be passed in one of the integer registers, reserve that
599 register and associate this value to that register so that
600 we can store the argument address as soon as we know it. */
601 if (class[0] == AMD64_MEMORY
602 && tdep
->memory_args_by_pointer
603 && integer_reg
< tdep
->call_dummy_num_integer_regs
)
604 arg_addr_regno
[num_stack_args
] =
605 tdep
->call_dummy_integer_regs
[integer_reg
++];
607 arg_addr_regno
[num_stack_args
] = -1;
612 /* The argument will be passed in registers. */
613 const gdb_byte
*valbuf
= value_contents (args
[i
]);
616 gdb_assert (len
<= 16);
618 for (j
= 0; len
> 0; j
++, len
-= 8)
626 regnum
= integer_regs
[integer_reg
++];
630 regnum
= sse_regnum
[sse_reg
++];
634 gdb_assert (sse_reg
> 0);
635 regnum
= sse_regnum
[sse_reg
- 1];
640 gdb_assert (!"Unexpected register class.");
643 gdb_assert (regnum
!= -1);
644 memset (buf
, 0, sizeof buf
);
645 memcpy (buf
, valbuf
+ j
* 8, min (len
, 8));
646 regcache_raw_write_part (regcache
, regnum
, offset
, 8, buf
);
651 /* Allocate space for the arguments on the stack. */
652 sp
-= num_elements
* 8;
654 /* The psABI says that "The end of the input argument area shall be
655 aligned on a 16 byte boundary." */
658 /* Write out the arguments to the stack. */
659 for (i
= 0; i
< num_stack_args
; i
++)
661 struct type
*type
= value_type (stack_args
[i
]);
662 const gdb_byte
*valbuf
= value_contents (stack_args
[i
]);
663 int len
= TYPE_LENGTH (type
);
664 CORE_ADDR arg_addr
= sp
+ element
* 8;
666 write_memory (arg_addr
, valbuf
, len
);
667 if (arg_addr_regno
[i
] >= 0)
669 /* We also need to store the address of that argument in
670 the given register. */
672 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
674 store_unsigned_integer (buf
, 8, byte_order
, arg_addr
);
675 regcache_cooked_write (regcache
, arg_addr_regno
[i
], buf
);
677 element
+= ((len
+ 7) / 8);
680 /* The psABI says that "For calls that may call functions that use
681 varargs or stdargs (prototype-less calls or calls to functions
682 containing ellipsis (...) in the declaration) %al is used as
683 hidden argument to specify the number of SSE registers used. */
684 regcache_raw_write_unsigned (regcache
, AMD64_RAX_REGNUM
, sse_reg
);
689 amd64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
690 struct regcache
*regcache
, CORE_ADDR bp_addr
,
691 int nargs
, struct value
**args
, CORE_ADDR sp
,
692 int struct_return
, CORE_ADDR struct_addr
)
694 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
695 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
698 /* Pass arguments. */
699 sp
= amd64_push_arguments (regcache
, nargs
, args
, sp
, struct_return
);
701 /* Pass "hidden" argument". */
704 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
705 /* The "hidden" argument is passed throught the first argument
707 const int arg_regnum
= tdep
->call_dummy_integer_regs
[0];
709 store_unsigned_integer (buf
, 8, byte_order
, struct_addr
);
710 regcache_cooked_write (regcache
, arg_regnum
, buf
);
713 /* Reserve some memory on the stack for the integer-parameter registers,
714 if required by the ABI. */
715 if (tdep
->integer_param_regs_saved_in_caller_frame
)
716 sp
-= tdep
->call_dummy_num_integer_regs
* 8;
718 /* Store return address. */
720 store_unsigned_integer (buf
, 8, byte_order
, bp_addr
);
721 write_memory (sp
, buf
, 8);
723 /* Finally, update the stack pointer... */
724 store_unsigned_integer (buf
, 8, byte_order
, sp
);
725 regcache_cooked_write (regcache
, AMD64_RSP_REGNUM
, buf
);
727 /* ...and fake a frame pointer. */
728 regcache_cooked_write (regcache
, AMD64_RBP_REGNUM
, buf
);
733 /* Displaced instruction handling. */
735 /* A partially decoded instruction.
736 This contains enough details for displaced stepping purposes. */
740 /* The number of opcode bytes. */
742 /* The offset of the rex prefix or -1 if not present. */
744 /* The offset to the first opcode byte. */
746 /* The offset to the modrm byte or -1 if not present. */
749 /* The raw instruction. */
753 struct displaced_step_closure
755 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
760 /* Details of the instruction. */
761 struct amd64_insn insn_details
;
763 /* Amount of space allocated to insn_buf. */
766 /* The possibly modified insn.
767 This is a variable-length field. */
768 gdb_byte insn_buf
[1];
771 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
772 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
773 at which point delete these in favor of libopcodes' versions). */
775 static const unsigned char onebyte_has_modrm
[256] = {
776 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
777 /* ------------------------------- */
778 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
779 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
780 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
781 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
782 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
783 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
784 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
785 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
786 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
787 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
788 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
789 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
790 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
791 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
792 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
793 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
794 /* ------------------------------- */
795 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
798 static const unsigned char twobyte_has_modrm
[256] = {
799 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
800 /* ------------------------------- */
801 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
802 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
803 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
804 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
805 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
806 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
807 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
808 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
809 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
810 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
811 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
812 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
813 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
814 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
815 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
816 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
817 /* ------------------------------- */
818 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
821 static int amd64_syscall_p (const struct amd64_insn
*insn
, int *lengthp
);
824 rex_prefix_p (gdb_byte pfx
)
826 return REX_PREFIX_P (pfx
);
829 /* Skip the legacy instruction prefixes in INSN.
830 We assume INSN is properly sentineled so we don't have to worry
831 about falling off the end of the buffer. */
834 amd64_skip_prefixes (gdb_byte
*insn
)
840 case DATA_PREFIX_OPCODE
:
841 case ADDR_PREFIX_OPCODE
:
842 case CS_PREFIX_OPCODE
:
843 case DS_PREFIX_OPCODE
:
844 case ES_PREFIX_OPCODE
:
845 case FS_PREFIX_OPCODE
:
846 case GS_PREFIX_OPCODE
:
847 case SS_PREFIX_OPCODE
:
848 case LOCK_PREFIX_OPCODE
:
849 case REPE_PREFIX_OPCODE
:
850 case REPNE_PREFIX_OPCODE
:
862 /* fprintf-function for amd64_insn_length.
863 This function is a nop, we don't want to print anything, we just want to
864 compute the length of the insn. */
866 static int ATTR_FORMAT (printf
, 2, 3)
867 amd64_insn_length_fprintf (void *stream
, const char *format
, ...)
872 /* Initialize a struct disassemble_info for amd64_insn_length. */
875 amd64_insn_length_init_dis (struct gdbarch
*gdbarch
,
876 struct disassemble_info
*di
,
877 const gdb_byte
*insn
, int max_len
,
880 init_disassemble_info (di
, NULL
, amd64_insn_length_fprintf
);
882 /* init_disassemble_info installs buffer_read_memory, etc.
883 so we don't need to do that here.
884 The cast is necessary until disassemble_info is const-ified. */
885 di
->buffer
= (gdb_byte
*) insn
;
886 di
->buffer_length
= max_len
;
887 di
->buffer_vma
= addr
;
889 di
->arch
= gdbarch_bfd_arch_info (gdbarch
)->arch
;
890 di
->mach
= gdbarch_bfd_arch_info (gdbarch
)->mach
;
891 di
->endian
= gdbarch_byte_order (gdbarch
);
892 di
->endian_code
= gdbarch_byte_order_for_code (gdbarch
);
894 disassemble_init_for_target (di
);
897 /* Return the length in bytes of INSN.
898 MAX_LEN is the size of the buffer containing INSN.
899 libopcodes currently doesn't export a utility to compute the
900 instruction length, so use the disassembler until then. */
903 amd64_insn_length (struct gdbarch
*gdbarch
,
904 const gdb_byte
*insn
, int max_len
, CORE_ADDR addr
)
906 struct disassemble_info di
;
908 amd64_insn_length_init_dis (gdbarch
, &di
, insn
, max_len
, addr
);
910 return gdbarch_print_insn (gdbarch
, addr
, &di
);
913 /* Return an integer register (other than RSP) that is unused as an input
915 In order to not require adding a rex prefix if the insn doesn't already
916 have one, the result is restricted to RAX ... RDI, sans RSP.
917 The register numbering of the result follows architecture ordering,
921 amd64_get_unused_input_int_reg (const struct amd64_insn
*details
)
923 /* 1 bit for each reg */
924 int used_regs_mask
= 0;
926 /* There can be at most 3 int regs used as inputs in an insn, and we have
927 7 to choose from (RAX ... RDI, sans RSP).
928 This allows us to take a conservative approach and keep things simple.
929 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
930 that implicitly specify RAX. */
933 used_regs_mask
|= 1 << EAX_REG_NUM
;
934 /* Similarily avoid RDX, implicit operand in divides. */
935 used_regs_mask
|= 1 << EDX_REG_NUM
;
937 used_regs_mask
|= 1 << ESP_REG_NUM
;
939 /* If the opcode is one byte long and there's no ModRM byte,
940 assume the opcode specifies a register. */
941 if (details
->opcode_len
== 1 && details
->modrm_offset
== -1)
942 used_regs_mask
|= 1 << (details
->raw_insn
[details
->opcode_offset
] & 7);
944 /* Mark used regs in the modrm/sib bytes. */
945 if (details
->modrm_offset
!= -1)
947 int modrm
= details
->raw_insn
[details
->modrm_offset
];
948 int mod
= MODRM_MOD_FIELD (modrm
);
949 int reg
= MODRM_REG_FIELD (modrm
);
950 int rm
= MODRM_RM_FIELD (modrm
);
951 int have_sib
= mod
!= 3 && rm
== 4;
953 /* Assume the reg field of the modrm byte specifies a register. */
954 used_regs_mask
|= 1 << reg
;
958 int base
= SIB_BASE_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
959 int index
= SIB_INDEX_FIELD (details
->raw_insn
[details
->modrm_offset
+ 1]);
960 used_regs_mask
|= 1 << base
;
961 used_regs_mask
|= 1 << index
;
965 used_regs_mask
|= 1 << rm
;
969 gdb_assert (used_regs_mask
< 256);
970 gdb_assert (used_regs_mask
!= 255);
972 /* Finally, find a free reg. */
976 for (i
= 0; i
< 8; ++i
)
978 if (! (used_regs_mask
& (1 << i
)))
982 /* We shouldn't get here. */
983 internal_error (__FILE__
, __LINE__
, _("unable to find free reg"));
987 /* Extract the details of INSN that we need. */
990 amd64_get_insn_details (gdb_byte
*insn
, struct amd64_insn
*details
)
992 gdb_byte
*start
= insn
;
995 details
->raw_insn
= insn
;
997 details
->opcode_len
= -1;
998 details
->rex_offset
= -1;
999 details
->opcode_offset
= -1;
1000 details
->modrm_offset
= -1;
1002 /* Skip legacy instruction prefixes. */
1003 insn
= amd64_skip_prefixes (insn
);
1005 /* Skip REX instruction prefix. */
1006 if (rex_prefix_p (*insn
))
1008 details
->rex_offset
= insn
- start
;
1012 details
->opcode_offset
= insn
- start
;
1014 if (*insn
== TWO_BYTE_OPCODE_ESCAPE
)
1016 /* Two or three-byte opcode. */
1018 need_modrm
= twobyte_has_modrm
[*insn
];
1020 /* Check for three-byte opcode. */
1030 details
->opcode_len
= 3;
1033 details
->opcode_len
= 2;
1039 /* One-byte opcode. */
1040 need_modrm
= onebyte_has_modrm
[*insn
];
1041 details
->opcode_len
= 1;
1047 details
->modrm_offset
= insn
- start
;
1051 /* Update %rip-relative addressing in INSN.
1053 %rip-relative addressing only uses a 32-bit displacement.
1054 32 bits is not enough to be guaranteed to cover the distance between where
1055 the real instruction is and where its copy is.
1056 Convert the insn to use base+disp addressing.
1057 We set base = pc + insn_length so we can leave disp unchanged. */
1060 fixup_riprel (struct gdbarch
*gdbarch
, struct displaced_step_closure
*dsc
,
1061 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1063 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1064 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1065 int modrm_offset
= insn_details
->modrm_offset
;
1066 gdb_byte
*insn
= insn_details
->raw_insn
+ modrm_offset
;
1070 int arch_tmp_regno
, tmp_regno
;
1071 ULONGEST orig_value
;
1073 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1076 /* Compute the rip-relative address. */
1077 disp
= extract_signed_integer (insn
, sizeof (int32_t), byte_order
);
1078 insn_length
= amd64_insn_length (gdbarch
, dsc
->insn_buf
, dsc
->max_len
, from
);
1079 rip_base
= from
+ insn_length
;
1081 /* We need a register to hold the address.
1082 Pick one not used in the insn.
1083 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1084 arch_tmp_regno
= amd64_get_unused_input_int_reg (insn_details
);
1085 tmp_regno
= amd64_arch_reg_to_regnum (arch_tmp_regno
);
1087 /* REX.B should be unset as we were using rip-relative addressing,
1088 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1089 if (insn_details
->rex_offset
!= -1)
1090 dsc
->insn_buf
[insn_details
->rex_offset
] &= ~REX_B
;
1092 regcache_cooked_read_unsigned (regs
, tmp_regno
, &orig_value
);
1093 dsc
->tmp_regno
= tmp_regno
;
1094 dsc
->tmp_save
= orig_value
;
1097 /* Convert the ModRM field to be base+disp. */
1098 dsc
->insn_buf
[modrm_offset
] &= ~0xc7;
1099 dsc
->insn_buf
[modrm_offset
] |= 0x80 + arch_tmp_regno
;
1101 regcache_cooked_write_unsigned (regs
, tmp_regno
, rip_base
);
1103 if (debug_displaced
)
1104 fprintf_unfiltered (gdb_stdlog
, "displaced: %%rip-relative addressing used.\n"
1105 "displaced: using temp reg %d, old value %s, new value %s\n",
1106 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
),
1107 paddress (gdbarch
, rip_base
));
1111 fixup_displaced_copy (struct gdbarch
*gdbarch
,
1112 struct displaced_step_closure
*dsc
,
1113 CORE_ADDR from
, CORE_ADDR to
, struct regcache
*regs
)
1115 const struct amd64_insn
*details
= &dsc
->insn_details
;
1117 if (details
->modrm_offset
!= -1)
1119 gdb_byte modrm
= details
->raw_insn
[details
->modrm_offset
];
1121 if ((modrm
& 0xc7) == 0x05)
1123 /* The insn uses rip-relative addressing.
1125 fixup_riprel (gdbarch
, dsc
, from
, to
, regs
);
1130 struct displaced_step_closure
*
1131 amd64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
1132 CORE_ADDR from
, CORE_ADDR to
,
1133 struct regcache
*regs
)
1135 int len
= gdbarch_max_insn_length (gdbarch
);
1136 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1137 continually watch for running off the end of the buffer. */
1138 int fixup_sentinel_space
= len
;
1139 struct displaced_step_closure
*dsc
=
1140 xmalloc (sizeof (*dsc
) + len
+ fixup_sentinel_space
);
1141 gdb_byte
*buf
= &dsc
->insn_buf
[0];
1142 struct amd64_insn
*details
= &dsc
->insn_details
;
1145 dsc
->max_len
= len
+ fixup_sentinel_space
;
1147 read_memory (from
, buf
, len
);
1149 /* Set up the sentinel space so we don't have to worry about running
1150 off the end of the buffer. An excessive number of leading prefixes
1151 could otherwise cause this. */
1152 memset (buf
+ len
, 0, fixup_sentinel_space
);
1154 amd64_get_insn_details (buf
, details
);
1156 /* GDB may get control back after the insn after the syscall.
1157 Presumably this is a kernel bug.
1158 If this is a syscall, make sure there's a nop afterwards. */
1162 if (amd64_syscall_p (details
, &syscall_length
))
1163 buf
[details
->opcode_offset
+ syscall_length
] = NOP_OPCODE
;
1166 /* Modify the insn to cope with the address where it will be executed from.
1167 In particular, handle any rip-relative addressing. */
1168 fixup_displaced_copy (gdbarch
, dsc
, from
, to
, regs
);
1170 write_memory (to
, buf
, len
);
1172 if (debug_displaced
)
1174 fprintf_unfiltered (gdb_stdlog
, "displaced: copy %s->%s: ",
1175 paddress (gdbarch
, from
), paddress (gdbarch
, to
));
1176 displaced_step_dump_bytes (gdb_stdlog
, buf
, len
);
1183 amd64_absolute_jmp_p (const struct amd64_insn
*details
)
1185 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1187 if (insn
[0] == 0xff)
1189 /* jump near, absolute indirect (/4) */
1190 if ((insn
[1] & 0x38) == 0x20)
1193 /* jump far, absolute indirect (/5) */
1194 if ((insn
[1] & 0x38) == 0x28)
1202 amd64_absolute_call_p (const struct amd64_insn
*details
)
1204 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1206 if (insn
[0] == 0xff)
1208 /* Call near, absolute indirect (/2) */
1209 if ((insn
[1] & 0x38) == 0x10)
1212 /* Call far, absolute indirect (/3) */
1213 if ((insn
[1] & 0x38) == 0x18)
1221 amd64_ret_p (const struct amd64_insn
*details
)
1223 /* NOTE: gcc can emit "repz ; ret". */
1224 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1228 case 0xc2: /* ret near, pop N bytes */
1229 case 0xc3: /* ret near */
1230 case 0xca: /* ret far, pop N bytes */
1231 case 0xcb: /* ret far */
1232 case 0xcf: /* iret */
1241 amd64_call_p (const struct amd64_insn
*details
)
1243 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1245 if (amd64_absolute_call_p (details
))
1248 /* call near, relative */
1249 if (insn
[0] == 0xe8)
1255 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1256 length in bytes. Otherwise, return zero. */
1259 amd64_syscall_p (const struct amd64_insn
*details
, int *lengthp
)
1261 const gdb_byte
*insn
= &details
->raw_insn
[details
->opcode_offset
];
1263 if (insn
[0] == 0x0f && insn
[1] == 0x05)
1272 /* Fix up the state of registers and memory after having single-stepped
1273 a displaced instruction. */
1276 amd64_displaced_step_fixup (struct gdbarch
*gdbarch
,
1277 struct displaced_step_closure
*dsc
,
1278 CORE_ADDR from
, CORE_ADDR to
,
1279 struct regcache
*regs
)
1281 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1282 /* The offset we applied to the instruction's address. */
1283 ULONGEST insn_offset
= to
- from
;
1284 gdb_byte
*insn
= dsc
->insn_buf
;
1285 const struct amd64_insn
*insn_details
= &dsc
->insn_details
;
1287 if (debug_displaced
)
1288 fprintf_unfiltered (gdb_stdlog
,
1289 "displaced: fixup (%s, %s), "
1290 "insn = 0x%02x 0x%02x ...\n",
1291 paddress (gdbarch
, from
), paddress (gdbarch
, to
),
1294 /* If we used a tmp reg, restore it. */
1298 if (debug_displaced
)
1299 fprintf_unfiltered (gdb_stdlog
, "displaced: restoring reg %d to %s\n",
1300 dsc
->tmp_regno
, paddress (gdbarch
, dsc
->tmp_save
));
1301 regcache_cooked_write_unsigned (regs
, dsc
->tmp_regno
, dsc
->tmp_save
);
1304 /* The list of issues to contend with here is taken from
1305 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1306 Yay for Free Software! */
1308 /* Relocate the %rip back to the program's instruction stream,
1311 /* Except in the case of absolute or indirect jump or call
1312 instructions, or a return instruction, the new rip is relative to
1313 the displaced instruction; make it relative to the original insn.
1314 Well, signal handler returns don't need relocation either, but we use the
1315 value of %rip to recognize those; see below. */
1316 if (! amd64_absolute_jmp_p (insn_details
)
1317 && ! amd64_absolute_call_p (insn_details
)
1318 && ! amd64_ret_p (insn_details
))
1323 regcache_cooked_read_unsigned (regs
, AMD64_RIP_REGNUM
, &orig_rip
);
1325 /* A signal trampoline system call changes the %rip, resuming
1326 execution of the main program after the signal handler has
1327 returned. That makes them like 'return' instructions; we
1328 shouldn't relocate %rip.
1330 But most system calls don't, and we do need to relocate %rip.
1332 Our heuristic for distinguishing these cases: if stepping
1333 over the system call instruction left control directly after
1334 the instruction, the we relocate --- control almost certainly
1335 doesn't belong in the displaced copy. Otherwise, we assume
1336 the instruction has put control where it belongs, and leave
1337 it unrelocated. Goodness help us if there are PC-relative
1339 if (amd64_syscall_p (insn_details
, &insn_len
)
1340 && orig_rip
!= to
+ insn_len
1341 /* GDB can get control back after the insn after the syscall.
1342 Presumably this is a kernel bug.
1343 Fixup ensures its a nop, we add one to the length for it. */
1344 && orig_rip
!= to
+ insn_len
+ 1)
1346 if (debug_displaced
)
1347 fprintf_unfiltered (gdb_stdlog
,
1348 "displaced: syscall changed %%rip; "
1349 "not relocating\n");
1353 ULONGEST rip
= orig_rip
- insn_offset
;
1355 /* If we just stepped over a breakpoint insn, we don't backup
1356 the pc on purpose; this is to match behaviour without
1359 regcache_cooked_write_unsigned (regs
, AMD64_RIP_REGNUM
, rip
);
1361 if (debug_displaced
)
1362 fprintf_unfiltered (gdb_stdlog
,
1364 "relocated %%rip from %s to %s\n",
1365 paddress (gdbarch
, orig_rip
),
1366 paddress (gdbarch
, rip
));
1370 /* If the instruction was PUSHFL, then the TF bit will be set in the
1371 pushed value, and should be cleared. We'll leave this for later,
1372 since GDB already messes up the TF flag when stepping over a
1375 /* If the instruction was a call, the return address now atop the
1376 stack is the address following the copied instruction. We need
1377 to make it the address following the original instruction. */
1378 if (amd64_call_p (insn_details
))
1382 const ULONGEST retaddr_len
= 8;
1384 regcache_cooked_read_unsigned (regs
, AMD64_RSP_REGNUM
, &rsp
);
1385 retaddr
= read_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
);
1386 retaddr
= (retaddr
- insn_offset
) & 0xffffffffUL
;
1387 write_memory_unsigned_integer (rsp
, retaddr_len
, byte_order
, retaddr
);
1389 if (debug_displaced
)
1390 fprintf_unfiltered (gdb_stdlog
,
1391 "displaced: relocated return addr at %s "
1393 paddress (gdbarch
, rsp
),
1394 paddress (gdbarch
, retaddr
));
1398 /* The maximum number of saved registers. This should include %rip. */
1399 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1401 struct amd64_frame_cache
1405 CORE_ADDR sp_offset
;
1408 /* Saved registers. */
1409 CORE_ADDR saved_regs
[AMD64_NUM_SAVED_REGS
];
1413 /* Do we have a frame? */
1417 /* Initialize a frame cache. */
1420 amd64_init_frame_cache (struct amd64_frame_cache
*cache
)
1426 cache
->sp_offset
= -8;
1429 /* Saved registers. We initialize these to -1 since zero is a valid
1430 offset (that's where %rbp is supposed to be stored).
1431 The values start out as being offsets, and are later converted to
1432 addresses (at which point -1 is interpreted as an address, still meaning
1434 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1435 cache
->saved_regs
[i
] = -1;
1436 cache
->saved_sp
= 0;
1437 cache
->saved_sp_reg
= -1;
1439 /* Frameless until proven otherwise. */
1440 cache
->frameless_p
= 1;
1443 /* Allocate and initialize a frame cache. */
1445 static struct amd64_frame_cache
*
1446 amd64_alloc_frame_cache (void)
1448 struct amd64_frame_cache
*cache
;
1450 cache
= FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache
);
1451 amd64_init_frame_cache (cache
);
1455 /* GCC 4.4 and later, can put code in the prologue to realign the
1456 stack pointer. Check whether PC points to such code, and update
1457 CACHE accordingly. Return the first instruction after the code
1458 sequence or CURRENT_PC, whichever is smaller. If we don't
1459 recognize the code, return PC. */
1462 amd64_analyze_stack_align (CORE_ADDR pc
, CORE_ADDR current_pc
,
1463 struct amd64_frame_cache
*cache
)
1465 /* There are 2 code sequences to re-align stack before the frame
1468 1. Use a caller-saved saved register:
1474 2. Use a callee-saved saved register:
1481 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1483 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1484 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1489 int offset
, offset_and
;
1491 if (target_read_memory (pc
, buf
, sizeof buf
))
1494 /* Check caller-saved saved register. The first instruction has
1495 to be "leaq 8(%rsp), %reg". */
1496 if ((buf
[0] & 0xfb) == 0x48
1501 /* MOD must be binary 10 and R/M must be binary 100. */
1502 if ((buf
[2] & 0xc7) != 0x44)
1505 /* REG has register number. */
1506 reg
= (buf
[2] >> 3) & 7;
1508 /* Check the REX.R bit. */
1516 /* Check callee-saved saved register. The first instruction
1517 has to be "pushq %reg". */
1519 if ((buf
[0] & 0xf8) == 0x50)
1521 else if ((buf
[0] & 0xf6) == 0x40
1522 && (buf
[1] & 0xf8) == 0x50)
1524 /* Check the REX.B bit. */
1525 if ((buf
[0] & 1) != 0)
1534 reg
+= buf
[offset
] & 0x7;
1538 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1539 if ((buf
[offset
] & 0xfb) != 0x48
1540 || buf
[offset
+ 1] != 0x8d
1541 || buf
[offset
+ 3] != 0x24
1542 || buf
[offset
+ 4] != 0x10)
1545 /* MOD must be binary 10 and R/M must be binary 100. */
1546 if ((buf
[offset
+ 2] & 0xc7) != 0x44)
1549 /* REG has register number. */
1550 r
= (buf
[offset
+ 2] >> 3) & 7;
1552 /* Check the REX.R bit. */
1553 if (buf
[offset
] == 0x4c)
1556 /* Registers in pushq and leaq have to be the same. */
1563 /* Rigister can't be %rsp nor %rbp. */
1564 if (reg
== 4 || reg
== 5)
1567 /* The next instruction has to be "andq $-XXX, %rsp". */
1568 if (buf
[offset
] != 0x48
1569 || buf
[offset
+ 2] != 0xe4
1570 || (buf
[offset
+ 1] != 0x81 && buf
[offset
+ 1] != 0x83))
1573 offset_and
= offset
;
1574 offset
+= buf
[offset
+ 1] == 0x81 ? 7 : 4;
1576 /* The next instruction has to be "pushq -8(%reg)". */
1578 if (buf
[offset
] == 0xff)
1580 else if ((buf
[offset
] & 0xf6) == 0x40
1581 && buf
[offset
+ 1] == 0xff)
1583 /* Check the REX.B bit. */
1584 if ((buf
[offset
] & 0x1) != 0)
1591 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1593 if (buf
[offset
+ 1] != 0xf8
1594 || (buf
[offset
] & 0xf8) != 0x70)
1597 /* R/M has register. */
1598 r
+= buf
[offset
] & 7;
1600 /* Registers in leaq and pushq have to be the same. */
1604 if (current_pc
> pc
+ offset_and
)
1605 cache
->saved_sp_reg
= amd64_arch_reg_to_regnum (reg
);
1607 return min (pc
+ offset
+ 2, current_pc
);
1610 /* Do a limited analysis of the prologue at PC and update CACHE
1611 accordingly. Bail out early if CURRENT_PC is reached. Return the
1612 address where the analysis stopped.
1614 We will handle only functions beginning with:
1617 movq %rsp, %rbp 0x48 0x89 0xe5
1619 Any function that doesn't start with this sequence will be assumed
1620 to have no prologue and thus no valid frame pointer in %rbp. */
1623 amd64_analyze_prologue (struct gdbarch
*gdbarch
,
1624 CORE_ADDR pc
, CORE_ADDR current_pc
,
1625 struct amd64_frame_cache
*cache
)
1627 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1628 static gdb_byte proto
[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1632 if (current_pc
<= pc
)
1635 pc
= amd64_analyze_stack_align (pc
, current_pc
, cache
);
1637 op
= read_memory_unsigned_integer (pc
, 1, byte_order
);
1639 if (op
== 0x55) /* pushq %rbp */
1641 /* Take into account that we've executed the `pushq %rbp' that
1642 starts this instruction sequence. */
1643 cache
->saved_regs
[AMD64_RBP_REGNUM
] = 0;
1644 cache
->sp_offset
+= 8;
1646 /* If that's all, return now. */
1647 if (current_pc
<= pc
+ 1)
1650 /* Check for `movq %rsp, %rbp'. */
1651 read_memory (pc
+ 1, buf
, 3);
1652 if (memcmp (buf
, proto
, 3) != 0)
1655 /* OK, we actually have a frame. */
1656 cache
->frameless_p
= 0;
1663 /* Return PC of first real instruction. */
1666 amd64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR start_pc
)
1668 struct amd64_frame_cache cache
;
1671 amd64_init_frame_cache (&cache
);
1672 pc
= amd64_analyze_prologue (gdbarch
, start_pc
, 0xffffffffffffffffLL
,
1674 if (cache
.frameless_p
)
1681 /* Normal frames. */
1683 static struct amd64_frame_cache
*
1684 amd64_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
1686 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1687 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1688 struct amd64_frame_cache
*cache
;
1695 cache
= amd64_alloc_frame_cache ();
1696 *this_cache
= cache
;
1698 cache
->pc
= get_frame_func (this_frame
);
1700 amd64_analyze_prologue (gdbarch
, cache
->pc
, get_frame_pc (this_frame
),
1703 if (cache
->saved_sp_reg
!= -1)
1705 /* Stack pointer has been saved. */
1706 get_frame_register (this_frame
, cache
->saved_sp_reg
, buf
);
1707 cache
->saved_sp
= extract_unsigned_integer(buf
, 8, byte_order
);
1710 if (cache
->frameless_p
)
1712 /* We didn't find a valid frame. If we're at the start of a
1713 function, or somewhere half-way its prologue, the function's
1714 frame probably hasn't been fully setup yet. Try to
1715 reconstruct the base address for the stack frame by looking
1716 at the stack pointer. For truly "frameless" functions this
1719 if (cache
->saved_sp_reg
!= -1)
1721 /* We're halfway aligning the stack. */
1722 cache
->base
= ((cache
->saved_sp
- 8) & 0xfffffffffffffff0LL
) - 8;
1723 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->saved_sp
- 8;
1725 /* This will be added back below. */
1726 cache
->saved_regs
[AMD64_RIP_REGNUM
] -= cache
->base
;
1730 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
1731 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
)
1737 get_frame_register (this_frame
, AMD64_RBP_REGNUM
, buf
);
1738 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
);
1741 /* Now that we have the base address for the stack frame we can
1742 calculate the value of %rsp in the calling frame. */
1743 cache
->saved_sp
= cache
->base
+ 16;
1745 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1746 frame we find it at the same offset from the reconstructed base
1747 address. If we're halfway aligning the stack, %rip is handled
1748 differently (see above). */
1749 if (!cache
->frameless_p
|| cache
->saved_sp_reg
== -1)
1750 cache
->saved_regs
[AMD64_RIP_REGNUM
] = 8;
1752 /* Adjust all the saved registers such that they contain addresses
1753 instead of offsets. */
1754 for (i
= 0; i
< AMD64_NUM_SAVED_REGS
; i
++)
1755 if (cache
->saved_regs
[i
] != -1)
1756 cache
->saved_regs
[i
] += cache
->base
;
1762 amd64_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1763 struct frame_id
*this_id
)
1765 struct amd64_frame_cache
*cache
=
1766 amd64_frame_cache (this_frame
, this_cache
);
1768 /* This marks the outermost frame. */
1769 if (cache
->base
== 0)
1772 (*this_id
) = frame_id_build (cache
->base
+ 16, cache
->pc
);
1775 static struct value
*
1776 amd64_frame_prev_register (struct frame_info
*this_frame
, void **this_cache
,
1779 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1780 struct amd64_frame_cache
*cache
=
1781 amd64_frame_cache (this_frame
, this_cache
);
1783 gdb_assert (regnum
>= 0);
1785 if (regnum
== gdbarch_sp_regnum (gdbarch
) && cache
->saved_sp
)
1786 return frame_unwind_got_constant (this_frame
, regnum
, cache
->saved_sp
);
1788 if (regnum
< AMD64_NUM_SAVED_REGS
&& cache
->saved_regs
[regnum
] != -1)
1789 return frame_unwind_got_memory (this_frame
, regnum
,
1790 cache
->saved_regs
[regnum
]);
1792 return frame_unwind_got_register (this_frame
, regnum
, regnum
);
1795 static const struct frame_unwind amd64_frame_unwind
=
1798 amd64_frame_this_id
,
1799 amd64_frame_prev_register
,
1801 default_frame_sniffer
1805 /* Signal trampolines. */
1807 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1808 64-bit variants. This would require using identical frame caches
1809 on both platforms. */
1811 static struct amd64_frame_cache
*
1812 amd64_sigtramp_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
1814 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1815 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1816 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1817 struct amd64_frame_cache
*cache
;
1825 cache
= amd64_alloc_frame_cache ();
1827 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
1828 cache
->base
= extract_unsigned_integer (buf
, 8, byte_order
) - 8;
1830 addr
= tdep
->sigcontext_addr (this_frame
);
1831 gdb_assert (tdep
->sc_reg_offset
);
1832 gdb_assert (tdep
->sc_num_regs
<= AMD64_NUM_SAVED_REGS
);
1833 for (i
= 0; i
< tdep
->sc_num_regs
; i
++)
1834 if (tdep
->sc_reg_offset
[i
] != -1)
1835 cache
->saved_regs
[i
] = addr
+ tdep
->sc_reg_offset
[i
];
1837 *this_cache
= cache
;
1842 amd64_sigtramp_frame_this_id (struct frame_info
*this_frame
,
1843 void **this_cache
, struct frame_id
*this_id
)
1845 struct amd64_frame_cache
*cache
=
1846 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
1848 (*this_id
) = frame_id_build (cache
->base
+ 16, get_frame_pc (this_frame
));
1851 static struct value
*
1852 amd64_sigtramp_frame_prev_register (struct frame_info
*this_frame
,
1853 void **this_cache
, int regnum
)
1855 /* Make sure we've initialized the cache. */
1856 amd64_sigtramp_frame_cache (this_frame
, this_cache
);
1858 return amd64_frame_prev_register (this_frame
, this_cache
, regnum
);
1862 amd64_sigtramp_frame_sniffer (const struct frame_unwind
*self
,
1863 struct frame_info
*this_frame
,
1866 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1868 /* We shouldn't even bother if we don't have a sigcontext_addr
1870 if (tdep
->sigcontext_addr
== NULL
)
1873 if (tdep
->sigtramp_p
!= NULL
)
1875 if (tdep
->sigtramp_p (this_frame
))
1879 if (tdep
->sigtramp_start
!= 0)
1881 CORE_ADDR pc
= get_frame_pc (this_frame
);
1883 gdb_assert (tdep
->sigtramp_end
!= 0);
1884 if (pc
>= tdep
->sigtramp_start
&& pc
< tdep
->sigtramp_end
)
1891 static const struct frame_unwind amd64_sigtramp_frame_unwind
=
1894 amd64_sigtramp_frame_this_id
,
1895 amd64_sigtramp_frame_prev_register
,
1897 amd64_sigtramp_frame_sniffer
1902 amd64_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
1904 struct amd64_frame_cache
*cache
=
1905 amd64_frame_cache (this_frame
, this_cache
);
1910 static const struct frame_base amd64_frame_base
=
1912 &amd64_frame_unwind
,
1913 amd64_frame_base_address
,
1914 amd64_frame_base_address
,
1915 amd64_frame_base_address
1918 /* Normal frames, but in a function epilogue. */
1920 /* The epilogue is defined here as the 'ret' instruction, which will
1921 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1922 the function's stack frame. */
1925 amd64_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
1929 if (target_read_memory (pc
, &insn
, 1))
1930 return 0; /* Can't read memory at pc. */
1932 if (insn
!= 0xc3) /* 'ret' instruction. */
1939 amd64_epilogue_frame_sniffer (const struct frame_unwind
*self
,
1940 struct frame_info
*this_frame
,
1941 void **this_prologue_cache
)
1943 if (frame_relative_level (this_frame
) == 0)
1944 return amd64_in_function_epilogue_p (get_frame_arch (this_frame
),
1945 get_frame_pc (this_frame
));
1950 static struct amd64_frame_cache
*
1951 amd64_epilogue_frame_cache (struct frame_info
*this_frame
, void **this_cache
)
1953 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1954 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1955 struct amd64_frame_cache
*cache
;
1961 cache
= amd64_alloc_frame_cache ();
1962 *this_cache
= cache
;
1964 /* Cache base will be %esp plus cache->sp_offset (-8). */
1965 get_frame_register (this_frame
, AMD64_RSP_REGNUM
, buf
);
1966 cache
->base
= extract_unsigned_integer (buf
, 8,
1967 byte_order
) + cache
->sp_offset
;
1969 /* Cache pc will be the frame func. */
1970 cache
->pc
= get_frame_pc (this_frame
);
1972 /* The saved %esp will be at cache->base plus 16. */
1973 cache
->saved_sp
= cache
->base
+ 16;
1975 /* The saved %eip will be at cache->base plus 8. */
1976 cache
->saved_regs
[AMD64_RIP_REGNUM
] = cache
->base
+ 8;
1982 amd64_epilogue_frame_this_id (struct frame_info
*this_frame
,
1984 struct frame_id
*this_id
)
1986 struct amd64_frame_cache
*cache
= amd64_epilogue_frame_cache (this_frame
,
1989 (*this_id
) = frame_id_build (cache
->base
+ 8, cache
->pc
);
1992 static const struct frame_unwind amd64_epilogue_frame_unwind
=
1995 amd64_epilogue_frame_this_id
,
1996 amd64_frame_prev_register
,
1998 amd64_epilogue_frame_sniffer
2001 static struct frame_id
2002 amd64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
2006 fp
= get_frame_register_unsigned (this_frame
, AMD64_RBP_REGNUM
);
2008 return frame_id_build (fp
+ 16, get_frame_pc (this_frame
));
2011 /* 16 byte align the SP per frame requirements. */
2014 amd64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
2016 return sp
& -(CORE_ADDR
)16;
2020 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2021 in the floating-point register set REGSET to register cache
2022 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2025 amd64_supply_fpregset (const struct regset
*regset
, struct regcache
*regcache
,
2026 int regnum
, const void *fpregs
, size_t len
)
2028 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (regset
->arch
);
2030 gdb_assert (len
== tdep
->sizeof_fpregset
);
2031 amd64_supply_fxsave (regcache
, regnum
, fpregs
);
2034 /* Collect register REGNUM from the register cache REGCACHE and store
2035 it in the buffer specified by FPREGS and LEN as described by the
2036 floating-point register set REGSET. If REGNUM is -1, do this for
2037 all registers in REGSET. */
2040 amd64_collect_fpregset (const struct regset
*regset
,
2041 const struct regcache
*regcache
,
2042 int regnum
, void *fpregs
, size_t len
)
2044 const struct gdbarch_tdep
*tdep
= gdbarch_tdep (regset
->arch
);
2046 gdb_assert (len
== tdep
->sizeof_fpregset
);
2047 amd64_collect_fxsave (regcache
, regnum
, fpregs
);
2050 /* Return the appropriate register set for the core section identified
2051 by SECT_NAME and SECT_SIZE. */
2053 static const struct regset
*
2054 amd64_regset_from_core_section (struct gdbarch
*gdbarch
,
2055 const char *sect_name
, size_t sect_size
)
2057 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2059 if (strcmp (sect_name
, ".reg2") == 0 && sect_size
== tdep
->sizeof_fpregset
)
2061 if (tdep
->fpregset
== NULL
)
2062 tdep
->fpregset
= regset_alloc (gdbarch
, amd64_supply_fpregset
,
2063 amd64_collect_fpregset
);
2065 return tdep
->fpregset
;
2068 return i386_regset_from_core_section (gdbarch
, sect_name
, sect_size
);
2072 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2073 %rdi. We expect its value to be a pointer to the jmp_buf structure
2074 from which we extract the address that we will land at. This
2075 address is copied into PC. This routine returns non-zero on
2079 amd64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2083 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2084 int jb_pc_offset
= gdbarch_tdep (gdbarch
)->jb_pc_offset
;
2085 int len
= TYPE_LENGTH (builtin_type (gdbarch
)->builtin_func_ptr
);
2087 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2088 longjmp will land. */
2089 if (jb_pc_offset
== -1)
2092 get_frame_register (frame
, AMD64_RDI_REGNUM
, buf
);
2093 jb_addr
= extract_typed_address
2094 (buf
, builtin_type (gdbarch
)->builtin_data_ptr
);
2095 if (target_read_memory (jb_addr
+ jb_pc_offset
, buf
, len
))
2098 *pc
= extract_typed_address (buf
, builtin_type (gdbarch
)->builtin_func_ptr
);
2103 static const int amd64_record_regmap
[] =
2105 AMD64_RAX_REGNUM
, AMD64_RCX_REGNUM
, AMD64_RDX_REGNUM
, AMD64_RBX_REGNUM
,
2106 AMD64_RSP_REGNUM
, AMD64_RBP_REGNUM
, AMD64_RSI_REGNUM
, AMD64_RDI_REGNUM
,
2107 AMD64_R8_REGNUM
, AMD64_R9_REGNUM
, AMD64_R10_REGNUM
, AMD64_R11_REGNUM
,
2108 AMD64_R12_REGNUM
, AMD64_R13_REGNUM
, AMD64_R14_REGNUM
, AMD64_R15_REGNUM
,
2109 AMD64_RIP_REGNUM
, AMD64_EFLAGS_REGNUM
, AMD64_CS_REGNUM
, AMD64_SS_REGNUM
,
2110 AMD64_DS_REGNUM
, AMD64_ES_REGNUM
, AMD64_FS_REGNUM
, AMD64_GS_REGNUM
2114 amd64_init_abi (struct gdbarch_info info
, struct gdbarch
*gdbarch
)
2116 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2117 const struct target_desc
*tdesc
= info
.target_desc
;
2119 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2120 floating-point registers. */
2121 tdep
->sizeof_fpregset
= I387_SIZEOF_FXSAVE
;
2123 if (! tdesc_has_registers (tdesc
))
2124 tdesc
= tdesc_amd64
;
2125 tdep
->tdesc
= tdesc
;
2127 tdep
->num_core_regs
= AMD64_NUM_GREGS
+ I387_NUM_REGS
;
2128 tdep
->register_names
= amd64_register_names
;
2130 /* AMD64 has an FPU and 16 SSE registers. */
2131 tdep
->st0_regnum
= AMD64_ST0_REGNUM
;
2132 tdep
->num_xmm_regs
= 16;
2134 /* This is what all the fuss is about. */
2135 set_gdbarch_long_bit (gdbarch
, 64);
2136 set_gdbarch_long_long_bit (gdbarch
, 64);
2137 set_gdbarch_ptr_bit (gdbarch
, 64);
2139 /* In contrast to the i386, on AMD64 a `long double' actually takes
2140 up 128 bits, even though it's still based on the i387 extended
2141 floating-point format which has only 80 significant bits. */
2142 set_gdbarch_long_double_bit (gdbarch
, 128);
2144 set_gdbarch_num_regs (gdbarch
, AMD64_NUM_REGS
);
2146 /* Register numbers of various important registers. */
2147 set_gdbarch_sp_regnum (gdbarch
, AMD64_RSP_REGNUM
); /* %rsp */
2148 set_gdbarch_pc_regnum (gdbarch
, AMD64_RIP_REGNUM
); /* %rip */
2149 set_gdbarch_ps_regnum (gdbarch
, AMD64_EFLAGS_REGNUM
); /* %eflags */
2150 set_gdbarch_fp0_regnum (gdbarch
, AMD64_ST0_REGNUM
); /* %st(0) */
2152 /* The "default" register numbering scheme for AMD64 is referred to
2153 as the "DWARF Register Number Mapping" in the System V psABI.
2154 The preferred debugging format for all known AMD64 targets is
2155 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2156 DWARF-1), but we provide the same mapping just in case. This
2157 mapping is also used for stabs, which GCC does support. */
2158 set_gdbarch_stab_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
2159 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, amd64_dwarf_reg_to_regnum
);
2161 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2162 be in use on any of the supported AMD64 targets. */
2164 /* Call dummy code. */
2165 set_gdbarch_push_dummy_call (gdbarch
, amd64_push_dummy_call
);
2166 set_gdbarch_frame_align (gdbarch
, amd64_frame_align
);
2167 set_gdbarch_frame_red_zone_size (gdbarch
, 128);
2168 tdep
->call_dummy_num_integer_regs
=
2169 ARRAY_SIZE (amd64_dummy_call_integer_regs
);
2170 tdep
->call_dummy_integer_regs
= amd64_dummy_call_integer_regs
;
2171 tdep
->classify
= amd64_classify
;
2173 set_gdbarch_convert_register_p (gdbarch
, i387_convert_register_p
);
2174 set_gdbarch_register_to_value (gdbarch
, i387_register_to_value
);
2175 set_gdbarch_value_to_register (gdbarch
, i387_value_to_register
);
2177 set_gdbarch_return_value (gdbarch
, amd64_return_value
);
2179 set_gdbarch_skip_prologue (gdbarch
, amd64_skip_prologue
);
2181 /* Avoid wiring in the MMX registers for now. */
2182 set_gdbarch_num_pseudo_regs (gdbarch
, 0);
2183 tdep
->mm0_regnum
= -1;
2185 tdep
->record_regmap
= amd64_record_regmap
;
2187 set_gdbarch_dummy_id (gdbarch
, amd64_dummy_id
);
2189 /* Hook the function epilogue frame unwinder. This unwinder is
2190 appended to the list first, so that it supercedes the other
2191 unwinders in function epilogues. */
2192 frame_unwind_prepend_unwinder (gdbarch
, &amd64_epilogue_frame_unwind
);
2194 /* Hook the prologue-based frame unwinders. */
2195 frame_unwind_append_unwinder (gdbarch
, &amd64_sigtramp_frame_unwind
);
2196 frame_unwind_append_unwinder (gdbarch
, &amd64_frame_unwind
);
2197 frame_base_set_default (gdbarch
, &amd64_frame_base
);
2199 /* If we have a register mapping, enable the generic core file support. */
2200 if (tdep
->gregset_reg_offset
)
2201 set_gdbarch_regset_from_core_section (gdbarch
,
2202 amd64_regset_from_core_section
);
2204 set_gdbarch_get_longjmp_target (gdbarch
, amd64_get_longjmp_target
);
2207 /* Provide a prototype to silence -Wmissing-prototypes. */
2208 void _initialize_amd64_tdep (void);
2211 _initialize_amd64_tdep (void)
2213 initialize_tdesc_amd64 ();
2217 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2218 sense that the instruction pointer and data pointer are simply
2219 64-bit offsets into the code segment and the data segment instead
2220 of a selector offset pair. The functions below store the upper 32
2221 bits of these pointers (instead of just the 16-bits of the segment
2224 /* Fill register REGNUM in REGCACHE with the appropriate
2225 floating-point or SSE register value from *FXSAVE. If REGNUM is
2226 -1, do this for all registers. This function masks off any of the
2227 reserved bits in *FXSAVE. */
2230 amd64_supply_fxsave (struct regcache
*regcache
, int regnum
,
2233 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2234 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2236 i387_supply_fxsave (regcache
, regnum
, fxsave
);
2238 if (fxsave
&& gdbarch_ptr_bit (gdbarch
) == 64)
2240 const gdb_byte
*regs
= fxsave
;
2242 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
2243 regcache_raw_supply (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
2244 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
2245 regcache_raw_supply (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);
2249 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2250 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2251 all registers. This function doesn't touch any of the reserved
2255 amd64_collect_fxsave (const struct regcache
*regcache
, int regnum
,
2258 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2259 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2260 gdb_byte
*regs
= fxsave
;
2262 i387_collect_fxsave (regcache
, regnum
, fxsave
);
2264 if (gdbarch_ptr_bit (gdbarch
) == 64)
2266 if (regnum
== -1 || regnum
== I387_FISEG_REGNUM (tdep
))
2267 regcache_raw_collect (regcache
, I387_FISEG_REGNUM (tdep
), regs
+ 12);
2268 if (regnum
== -1 || regnum
== I387_FOSEG_REGNUM (tdep
))
2269 regcache_raw_collect (regcache
, I387_FOSEG_REGNUM (tdep
), regs
+ 20);