Enable XML target descriptions for x86.
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 #include "features/i386/amd64.c"
46
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
53
54 /* Register information. */
55
56 static const char *amd64_register_names[] =
57 {
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
59
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
63
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
67
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
71 "mxcsr",
72 };
73
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
76
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs[] =
79 {
80 AMD64_RDI_REGNUM, /* %rdi */
81 AMD64_RSI_REGNUM, /* %rsi */
82 AMD64_RDX_REGNUM, /* %rdx */
83 AMD64_RCX_REGNUM, /* %rcx */
84 8, /* %r8 */
85 9 /* %r9 */
86 };
87
88 /* DWARF Register Number Mapping as defined in the System V psABI,
89 section 3.6. */
90
91 static int amd64_dwarf_regmap[] =
92 {
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
95 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
96 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
97
98 /* Frame Pointer Register RBP. */
99 AMD64_RBP_REGNUM,
100
101 /* Stack Pointer Register RSP. */
102 AMD64_RSP_REGNUM,
103
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
106
107 /* Return Address RA. Mapped to RIP. */
108 AMD64_RIP_REGNUM,
109
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
112 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
113 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
114 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
115
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
118 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
119 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
120 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
121
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
124 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
125 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
126 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
127
128 /* Control and Status Flags Register. */
129 AMD64_EFLAGS_REGNUM,
130
131 /* Selector Registers. */
132 AMD64_ES_REGNUM,
133 AMD64_CS_REGNUM,
134 AMD64_SS_REGNUM,
135 AMD64_DS_REGNUM,
136 AMD64_FS_REGNUM,
137 AMD64_GS_REGNUM,
138 -1,
139 -1,
140
141 /* Segment Base Address Registers. */
142 -1,
143 -1,
144 -1,
145 -1,
146
147 /* Special Selector Registers. */
148 -1,
149 -1,
150
151 /* Floating Point Control Registers. */
152 AMD64_MXCSR_REGNUM,
153 AMD64_FCTRL_REGNUM,
154 AMD64_FSTAT_REGNUM
155 };
156
157 static const int amd64_dwarf_regmap_len =
158 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
159
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
162
163 static int
164 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
165 {
166 int regnum = -1;
167
168 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
169 regnum = amd64_dwarf_regmap[reg];
170
171 if (regnum == -1)
172 warning (_("Unmapped DWARF Register #%d encountered."), reg);
173
174 return regnum;
175 }
176
177 /* Map architectural register numbers to gdb register numbers. */
178
179 static const int amd64_arch_regmap[16] =
180 {
181 AMD64_RAX_REGNUM, /* %rax */
182 AMD64_RCX_REGNUM, /* %rcx */
183 AMD64_RDX_REGNUM, /* %rdx */
184 AMD64_RBX_REGNUM, /* %rbx */
185 AMD64_RSP_REGNUM, /* %rsp */
186 AMD64_RBP_REGNUM, /* %rbp */
187 AMD64_RSI_REGNUM, /* %rsi */
188 AMD64_RDI_REGNUM, /* %rdi */
189 AMD64_R8_REGNUM, /* %r8 */
190 AMD64_R9_REGNUM, /* %r9 */
191 AMD64_R10_REGNUM, /* %r10 */
192 AMD64_R11_REGNUM, /* %r11 */
193 AMD64_R12_REGNUM, /* %r12 */
194 AMD64_R13_REGNUM, /* %r13 */
195 AMD64_R14_REGNUM, /* %r14 */
196 AMD64_R15_REGNUM /* %r15 */
197 };
198
199 static const int amd64_arch_regmap_len =
200 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
201
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
204
205 static int
206 amd64_arch_reg_to_regnum (int reg)
207 {
208 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
209
210 return amd64_arch_regmap[reg];
211 }
212
213 \f
214
215 /* Return the union class of CLASS1 and CLASS2. See the psABI for
216 details. */
217
218 static enum amd64_reg_class
219 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
220 {
221 /* Rule (a): If both classes are equal, this is the resulting class. */
222 if (class1 == class2)
223 return class1;
224
225 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
226 is the other class. */
227 if (class1 == AMD64_NO_CLASS)
228 return class2;
229 if (class2 == AMD64_NO_CLASS)
230 return class1;
231
232 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
233 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
234 return AMD64_MEMORY;
235
236 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
237 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
238 return AMD64_INTEGER;
239
240 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
241 MEMORY is used as class. */
242 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
243 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
244 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
245 return AMD64_MEMORY;
246
247 /* Rule (f): Otherwise class SSE is used. */
248 return AMD64_SSE;
249 }
250
251 /* Return non-zero if TYPE is a non-POD structure or union type. */
252
253 static int
254 amd64_non_pod_p (struct type *type)
255 {
256 /* ??? A class with a base class certainly isn't POD, but does this
257 catch all non-POD structure types? */
258 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
259 return 1;
260
261 return 0;
262 }
263
264 /* Classify TYPE according to the rules for aggregate (structures and
265 arrays) and union types, and store the result in CLASS. */
266
267 static void
268 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
269 {
270 int len = TYPE_LENGTH (type);
271
272 /* 1. If the size of an object is larger than two eightbytes, or in
273 C++, is a non-POD structure or union type, or contains
274 unaligned fields, it has class memory. */
275 if (len > 16 || amd64_non_pod_p (type))
276 {
277 class[0] = class[1] = AMD64_MEMORY;
278 return;
279 }
280
281 /* 2. Both eightbytes get initialized to class NO_CLASS. */
282 class[0] = class[1] = AMD64_NO_CLASS;
283
284 /* 3. Each field of an object is classified recursively so that
285 always two fields are considered. The resulting class is
286 calculated according to the classes of the fields in the
287 eightbyte: */
288
289 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
290 {
291 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
292
293 /* All fields in an array have the same type. */
294 amd64_classify (subtype, class);
295 if (len > 8 && class[1] == AMD64_NO_CLASS)
296 class[1] = class[0];
297 }
298 else
299 {
300 int i;
301
302 /* Structure or union. */
303 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
304 || TYPE_CODE (type) == TYPE_CODE_UNION);
305
306 for (i = 0; i < TYPE_NFIELDS (type); i++)
307 {
308 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
309 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
310 enum amd64_reg_class subclass[2];
311 int bitsize = TYPE_FIELD_BITSIZE (type, i);
312 int endpos;
313
314 if (bitsize == 0)
315 bitsize = TYPE_LENGTH (subtype) * 8;
316 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
317
318 /* Ignore static fields. */
319 if (field_is_static (&TYPE_FIELD (type, i)))
320 continue;
321
322 gdb_assert (pos == 0 || pos == 1);
323
324 amd64_classify (subtype, subclass);
325 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
326 if (bitsize <= 64 && pos == 0 && endpos == 1)
327 /* This is a bit of an odd case: We have a field that would
328 normally fit in one of the two eightbytes, except that
329 it is placed in a way that this field straddles them.
330 This has been seen with a structure containing an array.
331
332 The ABI is a bit unclear in this case, but we assume that
333 this field's class (stored in subclass[0]) must also be merged
334 into class[1]. In other words, our field has a piece stored
335 in the second eight-byte, and thus its class applies to
336 the second eight-byte as well.
337
338 In the case where the field length exceeds 8 bytes,
339 it should not be necessary to merge the field class
340 into class[1]. As LEN > 8, subclass[1] is necessarily
341 different from AMD64_NO_CLASS. If subclass[1] is equal
342 to subclass[0], then the normal class[1]/subclass[1]
343 merging will take care of everything. For subclass[1]
344 to be different from subclass[0], I can only see the case
345 where we have a SSE/SSEUP or X87/X87UP pair, which both
346 use up all 16 bytes of the aggregate, and are already
347 handled just fine (because each portion sits on its own
348 8-byte). */
349 class[1] = amd64_merge_classes (class[1], subclass[0]);
350 if (pos == 0)
351 class[1] = amd64_merge_classes (class[1], subclass[1]);
352 }
353 }
354
355 /* 4. Then a post merger cleanup is done: */
356
357 /* Rule (a): If one of the classes is MEMORY, the whole argument is
358 passed in memory. */
359 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
360 class[0] = class[1] = AMD64_MEMORY;
361
362 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
363 SSE. */
364 if (class[0] == AMD64_SSEUP)
365 class[0] = AMD64_SSE;
366 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
367 class[1] = AMD64_SSE;
368 }
369
370 /* Classify TYPE, and store the result in CLASS. */
371
372 void
373 amd64_classify (struct type *type, enum amd64_reg_class class[2])
374 {
375 enum type_code code = TYPE_CODE (type);
376 int len = TYPE_LENGTH (type);
377
378 class[0] = class[1] = AMD64_NO_CLASS;
379
380 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
381 long, long long, and pointers are in the INTEGER class. Similarly,
382 range types, used by languages such as Ada, are also in the INTEGER
383 class. */
384 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
385 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
386 || code == TYPE_CODE_CHAR
387 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
388 && (len == 1 || len == 2 || len == 4 || len == 8))
389 class[0] = AMD64_INTEGER;
390
391 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
392 are in class SSE. */
393 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
394 && (len == 4 || len == 8))
395 /* FIXME: __m64 . */
396 class[0] = AMD64_SSE;
397
398 /* Arguments of types __float128, _Decimal128 and __m128 are split into
399 two halves. The least significant ones belong to class SSE, the most
400 significant one to class SSEUP. */
401 else if (code == TYPE_CODE_DECFLOAT && len == 16)
402 /* FIXME: __float128, __m128. */
403 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
404
405 /* The 64-bit mantissa of arguments of type long double belongs to
406 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
407 class X87UP. */
408 else if (code == TYPE_CODE_FLT && len == 16)
409 /* Class X87 and X87UP. */
410 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
411
412 /* Aggregates. */
413 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
414 || code == TYPE_CODE_UNION)
415 amd64_classify_aggregate (type, class);
416 }
417
418 static enum return_value_convention
419 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
420 struct type *type, struct regcache *regcache,
421 gdb_byte *readbuf, const gdb_byte *writebuf)
422 {
423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
424 enum amd64_reg_class class[2];
425 int len = TYPE_LENGTH (type);
426 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
427 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
428 int integer_reg = 0;
429 int sse_reg = 0;
430 int i;
431
432 gdb_assert (!(readbuf && writebuf));
433 gdb_assert (tdep->classify);
434
435 /* 1. Classify the return type with the classification algorithm. */
436 tdep->classify (type, class);
437
438 /* 2. If the type has class MEMORY, then the caller provides space
439 for the return value and passes the address of this storage in
440 %rdi as if it were the first argument to the function. In effect,
441 this address becomes a hidden first argument.
442
443 On return %rax will contain the address that has been passed in
444 by the caller in %rdi. */
445 if (class[0] == AMD64_MEMORY)
446 {
447 /* As indicated by the comment above, the ABI guarantees that we
448 can always find the return value just after the function has
449 returned. */
450
451 if (readbuf)
452 {
453 ULONGEST addr;
454
455 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
456 read_memory (addr, readbuf, TYPE_LENGTH (type));
457 }
458
459 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
460 }
461
462 gdb_assert (class[1] != AMD64_MEMORY);
463 gdb_assert (len <= 16);
464
465 for (i = 0; len > 0; i++, len -= 8)
466 {
467 int regnum = -1;
468 int offset = 0;
469
470 switch (class[i])
471 {
472 case AMD64_INTEGER:
473 /* 3. If the class is INTEGER, the next available register
474 of the sequence %rax, %rdx is used. */
475 regnum = integer_regnum[integer_reg++];
476 break;
477
478 case AMD64_SSE:
479 /* 4. If the class is SSE, the next available SSE register
480 of the sequence %xmm0, %xmm1 is used. */
481 regnum = sse_regnum[sse_reg++];
482 break;
483
484 case AMD64_SSEUP:
485 /* 5. If the class is SSEUP, the eightbyte is passed in the
486 upper half of the last used SSE register. */
487 gdb_assert (sse_reg > 0);
488 regnum = sse_regnum[sse_reg - 1];
489 offset = 8;
490 break;
491
492 case AMD64_X87:
493 /* 6. If the class is X87, the value is returned on the X87
494 stack in %st0 as 80-bit x87 number. */
495 regnum = AMD64_ST0_REGNUM;
496 if (writebuf)
497 i387_return_value (gdbarch, regcache);
498 break;
499
500 case AMD64_X87UP:
501 /* 7. If the class is X87UP, the value is returned together
502 with the previous X87 value in %st0. */
503 gdb_assert (i > 0 && class[0] == AMD64_X87);
504 regnum = AMD64_ST0_REGNUM;
505 offset = 8;
506 len = 2;
507 break;
508
509 case AMD64_NO_CLASS:
510 continue;
511
512 default:
513 gdb_assert (!"Unexpected register class.");
514 }
515
516 gdb_assert (regnum != -1);
517
518 if (readbuf)
519 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
520 readbuf + i * 8);
521 if (writebuf)
522 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
523 writebuf + i * 8);
524 }
525
526 return RETURN_VALUE_REGISTER_CONVENTION;
527 }
528 \f
529
530 static CORE_ADDR
531 amd64_push_arguments (struct regcache *regcache, int nargs,
532 struct value **args, CORE_ADDR sp, int struct_return)
533 {
534 struct gdbarch *gdbarch = get_regcache_arch (regcache);
535 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
536 int *integer_regs = tdep->call_dummy_integer_regs;
537 int num_integer_regs = tdep->call_dummy_num_integer_regs;
538
539 static int sse_regnum[] =
540 {
541 /* %xmm0 ... %xmm7 */
542 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
543 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
544 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
545 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
546 };
547 struct value **stack_args = alloca (nargs * sizeof (struct value *));
548 /* An array that mirrors the stack_args array. For all arguments
549 that are passed by MEMORY, if that argument's address also needs
550 to be stored in a register, the ARG_ADDR_REGNO array will contain
551 that register number (or a negative value otherwise). */
552 int *arg_addr_regno = alloca (nargs * sizeof (int));
553 int num_stack_args = 0;
554 int num_elements = 0;
555 int element = 0;
556 int integer_reg = 0;
557 int sse_reg = 0;
558 int i;
559
560 gdb_assert (tdep->classify);
561
562 /* Reserve a register for the "hidden" argument. */
563 if (struct_return)
564 integer_reg++;
565
566 for (i = 0; i < nargs; i++)
567 {
568 struct type *type = value_type (args[i]);
569 int len = TYPE_LENGTH (type);
570 enum amd64_reg_class class[2];
571 int needed_integer_regs = 0;
572 int needed_sse_regs = 0;
573 int j;
574
575 /* Classify argument. */
576 tdep->classify (type, class);
577
578 /* Calculate the number of integer and SSE registers needed for
579 this argument. */
580 for (j = 0; j < 2; j++)
581 {
582 if (class[j] == AMD64_INTEGER)
583 needed_integer_regs++;
584 else if (class[j] == AMD64_SSE)
585 needed_sse_regs++;
586 }
587
588 /* Check whether enough registers are available, and if the
589 argument should be passed in registers at all. */
590 if (integer_reg + needed_integer_regs > num_integer_regs
591 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
592 || (needed_integer_regs == 0 && needed_sse_regs == 0))
593 {
594 /* The argument will be passed on the stack. */
595 num_elements += ((len + 7) / 8);
596 stack_args[num_stack_args] = args[i];
597 /* If this is an AMD64_MEMORY argument whose address must also
598 be passed in one of the integer registers, reserve that
599 register and associate this value to that register so that
600 we can store the argument address as soon as we know it. */
601 if (class[0] == AMD64_MEMORY
602 && tdep->memory_args_by_pointer
603 && integer_reg < tdep->call_dummy_num_integer_regs)
604 arg_addr_regno[num_stack_args] =
605 tdep->call_dummy_integer_regs[integer_reg++];
606 else
607 arg_addr_regno[num_stack_args] = -1;
608 num_stack_args++;
609 }
610 else
611 {
612 /* The argument will be passed in registers. */
613 const gdb_byte *valbuf = value_contents (args[i]);
614 gdb_byte buf[8];
615
616 gdb_assert (len <= 16);
617
618 for (j = 0; len > 0; j++, len -= 8)
619 {
620 int regnum = -1;
621 int offset = 0;
622
623 switch (class[j])
624 {
625 case AMD64_INTEGER:
626 regnum = integer_regs[integer_reg++];
627 break;
628
629 case AMD64_SSE:
630 regnum = sse_regnum[sse_reg++];
631 break;
632
633 case AMD64_SSEUP:
634 gdb_assert (sse_reg > 0);
635 regnum = sse_regnum[sse_reg - 1];
636 offset = 8;
637 break;
638
639 default:
640 gdb_assert (!"Unexpected register class.");
641 }
642
643 gdb_assert (regnum != -1);
644 memset (buf, 0, sizeof buf);
645 memcpy (buf, valbuf + j * 8, min (len, 8));
646 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
647 }
648 }
649 }
650
651 /* Allocate space for the arguments on the stack. */
652 sp -= num_elements * 8;
653
654 /* The psABI says that "The end of the input argument area shall be
655 aligned on a 16 byte boundary." */
656 sp &= ~0xf;
657
658 /* Write out the arguments to the stack. */
659 for (i = 0; i < num_stack_args; i++)
660 {
661 struct type *type = value_type (stack_args[i]);
662 const gdb_byte *valbuf = value_contents (stack_args[i]);
663 int len = TYPE_LENGTH (type);
664 CORE_ADDR arg_addr = sp + element * 8;
665
666 write_memory (arg_addr, valbuf, len);
667 if (arg_addr_regno[i] >= 0)
668 {
669 /* We also need to store the address of that argument in
670 the given register. */
671 gdb_byte buf[8];
672 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
673
674 store_unsigned_integer (buf, 8, byte_order, arg_addr);
675 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
676 }
677 element += ((len + 7) / 8);
678 }
679
680 /* The psABI says that "For calls that may call functions that use
681 varargs or stdargs (prototype-less calls or calls to functions
682 containing ellipsis (...) in the declaration) %al is used as
683 hidden argument to specify the number of SSE registers used. */
684 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
685 return sp;
686 }
687
688 static CORE_ADDR
689 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
690 struct regcache *regcache, CORE_ADDR bp_addr,
691 int nargs, struct value **args, CORE_ADDR sp,
692 int struct_return, CORE_ADDR struct_addr)
693 {
694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
695 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
696 gdb_byte buf[8];
697
698 /* Pass arguments. */
699 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
700
701 /* Pass "hidden" argument". */
702 if (struct_return)
703 {
704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
705 /* The "hidden" argument is passed throught the first argument
706 register. */
707 const int arg_regnum = tdep->call_dummy_integer_regs[0];
708
709 store_unsigned_integer (buf, 8, byte_order, struct_addr);
710 regcache_cooked_write (regcache, arg_regnum, buf);
711 }
712
713 /* Reserve some memory on the stack for the integer-parameter registers,
714 if required by the ABI. */
715 if (tdep->integer_param_regs_saved_in_caller_frame)
716 sp -= tdep->call_dummy_num_integer_regs * 8;
717
718 /* Store return address. */
719 sp -= 8;
720 store_unsigned_integer (buf, 8, byte_order, bp_addr);
721 write_memory (sp, buf, 8);
722
723 /* Finally, update the stack pointer... */
724 store_unsigned_integer (buf, 8, byte_order, sp);
725 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
726
727 /* ...and fake a frame pointer. */
728 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
729
730 return sp + 16;
731 }
732 \f
733 /* Displaced instruction handling. */
734
735 /* A partially decoded instruction.
736 This contains enough details for displaced stepping purposes. */
737
738 struct amd64_insn
739 {
740 /* The number of opcode bytes. */
741 int opcode_len;
742 /* The offset of the rex prefix or -1 if not present. */
743 int rex_offset;
744 /* The offset to the first opcode byte. */
745 int opcode_offset;
746 /* The offset to the modrm byte or -1 if not present. */
747 int modrm_offset;
748
749 /* The raw instruction. */
750 gdb_byte *raw_insn;
751 };
752
753 struct displaced_step_closure
754 {
755 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
756 int tmp_used;
757 int tmp_regno;
758 ULONGEST tmp_save;
759
760 /* Details of the instruction. */
761 struct amd64_insn insn_details;
762
763 /* Amount of space allocated to insn_buf. */
764 int max_len;
765
766 /* The possibly modified insn.
767 This is a variable-length field. */
768 gdb_byte insn_buf[1];
769 };
770
771 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
772 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
773 at which point delete these in favor of libopcodes' versions). */
774
775 static const unsigned char onebyte_has_modrm[256] = {
776 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
777 /* ------------------------------- */
778 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
779 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
780 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
781 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
782 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
783 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
784 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
785 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
786 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
787 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
788 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
789 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
790 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
791 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
792 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
793 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
794 /* ------------------------------- */
795 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
796 };
797
798 static const unsigned char twobyte_has_modrm[256] = {
799 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
800 /* ------------------------------- */
801 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
802 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
803 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
804 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
805 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
806 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
807 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
808 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
809 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
810 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
811 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
812 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
813 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
814 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
815 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
816 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
817 /* ------------------------------- */
818 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
819 };
820
821 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
822
823 static int
824 rex_prefix_p (gdb_byte pfx)
825 {
826 return REX_PREFIX_P (pfx);
827 }
828
829 /* Skip the legacy instruction prefixes in INSN.
830 We assume INSN is properly sentineled so we don't have to worry
831 about falling off the end of the buffer. */
832
833 static gdb_byte *
834 amd64_skip_prefixes (gdb_byte *insn)
835 {
836 while (1)
837 {
838 switch (*insn)
839 {
840 case DATA_PREFIX_OPCODE:
841 case ADDR_PREFIX_OPCODE:
842 case CS_PREFIX_OPCODE:
843 case DS_PREFIX_OPCODE:
844 case ES_PREFIX_OPCODE:
845 case FS_PREFIX_OPCODE:
846 case GS_PREFIX_OPCODE:
847 case SS_PREFIX_OPCODE:
848 case LOCK_PREFIX_OPCODE:
849 case REPE_PREFIX_OPCODE:
850 case REPNE_PREFIX_OPCODE:
851 ++insn;
852 continue;
853 default:
854 break;
855 }
856 break;
857 }
858
859 return insn;
860 }
861
862 /* fprintf-function for amd64_insn_length.
863 This function is a nop, we don't want to print anything, we just want to
864 compute the length of the insn. */
865
866 static int ATTR_FORMAT (printf, 2, 3)
867 amd64_insn_length_fprintf (void *stream, const char *format, ...)
868 {
869 return 0;
870 }
871
872 /* Initialize a struct disassemble_info for amd64_insn_length. */
873
874 static void
875 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
876 struct disassemble_info *di,
877 const gdb_byte *insn, int max_len,
878 CORE_ADDR addr)
879 {
880 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
881
882 /* init_disassemble_info installs buffer_read_memory, etc.
883 so we don't need to do that here.
884 The cast is necessary until disassemble_info is const-ified. */
885 di->buffer = (gdb_byte *) insn;
886 di->buffer_length = max_len;
887 di->buffer_vma = addr;
888
889 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
890 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
891 di->endian = gdbarch_byte_order (gdbarch);
892 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
893
894 disassemble_init_for_target (di);
895 }
896
897 /* Return the length in bytes of INSN.
898 MAX_LEN is the size of the buffer containing INSN.
899 libopcodes currently doesn't export a utility to compute the
900 instruction length, so use the disassembler until then. */
901
902 static int
903 amd64_insn_length (struct gdbarch *gdbarch,
904 const gdb_byte *insn, int max_len, CORE_ADDR addr)
905 {
906 struct disassemble_info di;
907
908 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
909
910 return gdbarch_print_insn (gdbarch, addr, &di);
911 }
912
913 /* Return an integer register (other than RSP) that is unused as an input
914 operand in INSN.
915 In order to not require adding a rex prefix if the insn doesn't already
916 have one, the result is restricted to RAX ... RDI, sans RSP.
917 The register numbering of the result follows architecture ordering,
918 e.g. RDI = 7. */
919
920 static int
921 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
922 {
923 /* 1 bit for each reg */
924 int used_regs_mask = 0;
925
926 /* There can be at most 3 int regs used as inputs in an insn, and we have
927 7 to choose from (RAX ... RDI, sans RSP).
928 This allows us to take a conservative approach and keep things simple.
929 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
930 that implicitly specify RAX. */
931
932 /* Avoid RAX. */
933 used_regs_mask |= 1 << EAX_REG_NUM;
934 /* Similarily avoid RDX, implicit operand in divides. */
935 used_regs_mask |= 1 << EDX_REG_NUM;
936 /* Avoid RSP. */
937 used_regs_mask |= 1 << ESP_REG_NUM;
938
939 /* If the opcode is one byte long and there's no ModRM byte,
940 assume the opcode specifies a register. */
941 if (details->opcode_len == 1 && details->modrm_offset == -1)
942 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
943
944 /* Mark used regs in the modrm/sib bytes. */
945 if (details->modrm_offset != -1)
946 {
947 int modrm = details->raw_insn[details->modrm_offset];
948 int mod = MODRM_MOD_FIELD (modrm);
949 int reg = MODRM_REG_FIELD (modrm);
950 int rm = MODRM_RM_FIELD (modrm);
951 int have_sib = mod != 3 && rm == 4;
952
953 /* Assume the reg field of the modrm byte specifies a register. */
954 used_regs_mask |= 1 << reg;
955
956 if (have_sib)
957 {
958 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
959 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
960 used_regs_mask |= 1 << base;
961 used_regs_mask |= 1 << index;
962 }
963 else
964 {
965 used_regs_mask |= 1 << rm;
966 }
967 }
968
969 gdb_assert (used_regs_mask < 256);
970 gdb_assert (used_regs_mask != 255);
971
972 /* Finally, find a free reg. */
973 {
974 int i;
975
976 for (i = 0; i < 8; ++i)
977 {
978 if (! (used_regs_mask & (1 << i)))
979 return i;
980 }
981
982 /* We shouldn't get here. */
983 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
984 }
985 }
986
987 /* Extract the details of INSN that we need. */
988
989 static void
990 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
991 {
992 gdb_byte *start = insn;
993 int need_modrm;
994
995 details->raw_insn = insn;
996
997 details->opcode_len = -1;
998 details->rex_offset = -1;
999 details->opcode_offset = -1;
1000 details->modrm_offset = -1;
1001
1002 /* Skip legacy instruction prefixes. */
1003 insn = amd64_skip_prefixes (insn);
1004
1005 /* Skip REX instruction prefix. */
1006 if (rex_prefix_p (*insn))
1007 {
1008 details->rex_offset = insn - start;
1009 ++insn;
1010 }
1011
1012 details->opcode_offset = insn - start;
1013
1014 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1015 {
1016 /* Two or three-byte opcode. */
1017 ++insn;
1018 need_modrm = twobyte_has_modrm[*insn];
1019
1020 /* Check for three-byte opcode. */
1021 switch (*insn)
1022 {
1023 case 0x24:
1024 case 0x25:
1025 case 0x38:
1026 case 0x3a:
1027 case 0x7a:
1028 case 0x7b:
1029 ++insn;
1030 details->opcode_len = 3;
1031 break;
1032 default:
1033 details->opcode_len = 2;
1034 break;
1035 }
1036 }
1037 else
1038 {
1039 /* One-byte opcode. */
1040 need_modrm = onebyte_has_modrm[*insn];
1041 details->opcode_len = 1;
1042 }
1043
1044 if (need_modrm)
1045 {
1046 ++insn;
1047 details->modrm_offset = insn - start;
1048 }
1049 }
1050
1051 /* Update %rip-relative addressing in INSN.
1052
1053 %rip-relative addressing only uses a 32-bit displacement.
1054 32 bits is not enough to be guaranteed to cover the distance between where
1055 the real instruction is and where its copy is.
1056 Convert the insn to use base+disp addressing.
1057 We set base = pc + insn_length so we can leave disp unchanged. */
1058
1059 static void
1060 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1061 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1062 {
1063 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1064 const struct amd64_insn *insn_details = &dsc->insn_details;
1065 int modrm_offset = insn_details->modrm_offset;
1066 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1067 CORE_ADDR rip_base;
1068 int32_t disp;
1069 int insn_length;
1070 int arch_tmp_regno, tmp_regno;
1071 ULONGEST orig_value;
1072
1073 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1074 ++insn;
1075
1076 /* Compute the rip-relative address. */
1077 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1078 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1079 rip_base = from + insn_length;
1080
1081 /* We need a register to hold the address.
1082 Pick one not used in the insn.
1083 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1084 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1085 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1086
1087 /* REX.B should be unset as we were using rip-relative addressing,
1088 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1089 if (insn_details->rex_offset != -1)
1090 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1091
1092 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1093 dsc->tmp_regno = tmp_regno;
1094 dsc->tmp_save = orig_value;
1095 dsc->tmp_used = 1;
1096
1097 /* Convert the ModRM field to be base+disp. */
1098 dsc->insn_buf[modrm_offset] &= ~0xc7;
1099 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1100
1101 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1102
1103 if (debug_displaced)
1104 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1105 "displaced: using temp reg %d, old value %s, new value %s\n",
1106 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1107 paddress (gdbarch, rip_base));
1108 }
1109
1110 static void
1111 fixup_displaced_copy (struct gdbarch *gdbarch,
1112 struct displaced_step_closure *dsc,
1113 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1114 {
1115 const struct amd64_insn *details = &dsc->insn_details;
1116
1117 if (details->modrm_offset != -1)
1118 {
1119 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1120
1121 if ((modrm & 0xc7) == 0x05)
1122 {
1123 /* The insn uses rip-relative addressing.
1124 Deal with it. */
1125 fixup_riprel (gdbarch, dsc, from, to, regs);
1126 }
1127 }
1128 }
1129
1130 struct displaced_step_closure *
1131 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1132 CORE_ADDR from, CORE_ADDR to,
1133 struct regcache *regs)
1134 {
1135 int len = gdbarch_max_insn_length (gdbarch);
1136 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1137 continually watch for running off the end of the buffer. */
1138 int fixup_sentinel_space = len;
1139 struct displaced_step_closure *dsc =
1140 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1141 gdb_byte *buf = &dsc->insn_buf[0];
1142 struct amd64_insn *details = &dsc->insn_details;
1143
1144 dsc->tmp_used = 0;
1145 dsc->max_len = len + fixup_sentinel_space;
1146
1147 read_memory (from, buf, len);
1148
1149 /* Set up the sentinel space so we don't have to worry about running
1150 off the end of the buffer. An excessive number of leading prefixes
1151 could otherwise cause this. */
1152 memset (buf + len, 0, fixup_sentinel_space);
1153
1154 amd64_get_insn_details (buf, details);
1155
1156 /* GDB may get control back after the insn after the syscall.
1157 Presumably this is a kernel bug.
1158 If this is a syscall, make sure there's a nop afterwards. */
1159 {
1160 int syscall_length;
1161
1162 if (amd64_syscall_p (details, &syscall_length))
1163 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1164 }
1165
1166 /* Modify the insn to cope with the address where it will be executed from.
1167 In particular, handle any rip-relative addressing. */
1168 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1169
1170 write_memory (to, buf, len);
1171
1172 if (debug_displaced)
1173 {
1174 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1175 paddress (gdbarch, from), paddress (gdbarch, to));
1176 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1177 }
1178
1179 return dsc;
1180 }
1181
1182 static int
1183 amd64_absolute_jmp_p (const struct amd64_insn *details)
1184 {
1185 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1186
1187 if (insn[0] == 0xff)
1188 {
1189 /* jump near, absolute indirect (/4) */
1190 if ((insn[1] & 0x38) == 0x20)
1191 return 1;
1192
1193 /* jump far, absolute indirect (/5) */
1194 if ((insn[1] & 0x38) == 0x28)
1195 return 1;
1196 }
1197
1198 return 0;
1199 }
1200
1201 static int
1202 amd64_absolute_call_p (const struct amd64_insn *details)
1203 {
1204 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1205
1206 if (insn[0] == 0xff)
1207 {
1208 /* Call near, absolute indirect (/2) */
1209 if ((insn[1] & 0x38) == 0x10)
1210 return 1;
1211
1212 /* Call far, absolute indirect (/3) */
1213 if ((insn[1] & 0x38) == 0x18)
1214 return 1;
1215 }
1216
1217 return 0;
1218 }
1219
1220 static int
1221 amd64_ret_p (const struct amd64_insn *details)
1222 {
1223 /* NOTE: gcc can emit "repz ; ret". */
1224 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1225
1226 switch (insn[0])
1227 {
1228 case 0xc2: /* ret near, pop N bytes */
1229 case 0xc3: /* ret near */
1230 case 0xca: /* ret far, pop N bytes */
1231 case 0xcb: /* ret far */
1232 case 0xcf: /* iret */
1233 return 1;
1234
1235 default:
1236 return 0;
1237 }
1238 }
1239
1240 static int
1241 amd64_call_p (const struct amd64_insn *details)
1242 {
1243 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1244
1245 if (amd64_absolute_call_p (details))
1246 return 1;
1247
1248 /* call near, relative */
1249 if (insn[0] == 0xe8)
1250 return 1;
1251
1252 return 0;
1253 }
1254
1255 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1256 length in bytes. Otherwise, return zero. */
1257
1258 static int
1259 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1260 {
1261 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1262
1263 if (insn[0] == 0x0f && insn[1] == 0x05)
1264 {
1265 *lengthp = 2;
1266 return 1;
1267 }
1268
1269 return 0;
1270 }
1271
1272 /* Fix up the state of registers and memory after having single-stepped
1273 a displaced instruction. */
1274
1275 void
1276 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1277 struct displaced_step_closure *dsc,
1278 CORE_ADDR from, CORE_ADDR to,
1279 struct regcache *regs)
1280 {
1281 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1282 /* The offset we applied to the instruction's address. */
1283 ULONGEST insn_offset = to - from;
1284 gdb_byte *insn = dsc->insn_buf;
1285 const struct amd64_insn *insn_details = &dsc->insn_details;
1286
1287 if (debug_displaced)
1288 fprintf_unfiltered (gdb_stdlog,
1289 "displaced: fixup (%s, %s), "
1290 "insn = 0x%02x 0x%02x ...\n",
1291 paddress (gdbarch, from), paddress (gdbarch, to),
1292 insn[0], insn[1]);
1293
1294 /* If we used a tmp reg, restore it. */
1295
1296 if (dsc->tmp_used)
1297 {
1298 if (debug_displaced)
1299 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1300 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1301 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1302 }
1303
1304 /* The list of issues to contend with here is taken from
1305 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1306 Yay for Free Software! */
1307
1308 /* Relocate the %rip back to the program's instruction stream,
1309 if necessary. */
1310
1311 /* Except in the case of absolute or indirect jump or call
1312 instructions, or a return instruction, the new rip is relative to
1313 the displaced instruction; make it relative to the original insn.
1314 Well, signal handler returns don't need relocation either, but we use the
1315 value of %rip to recognize those; see below. */
1316 if (! amd64_absolute_jmp_p (insn_details)
1317 && ! amd64_absolute_call_p (insn_details)
1318 && ! amd64_ret_p (insn_details))
1319 {
1320 ULONGEST orig_rip;
1321 int insn_len;
1322
1323 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1324
1325 /* A signal trampoline system call changes the %rip, resuming
1326 execution of the main program after the signal handler has
1327 returned. That makes them like 'return' instructions; we
1328 shouldn't relocate %rip.
1329
1330 But most system calls don't, and we do need to relocate %rip.
1331
1332 Our heuristic for distinguishing these cases: if stepping
1333 over the system call instruction left control directly after
1334 the instruction, the we relocate --- control almost certainly
1335 doesn't belong in the displaced copy. Otherwise, we assume
1336 the instruction has put control where it belongs, and leave
1337 it unrelocated. Goodness help us if there are PC-relative
1338 system calls. */
1339 if (amd64_syscall_p (insn_details, &insn_len)
1340 && orig_rip != to + insn_len
1341 /* GDB can get control back after the insn after the syscall.
1342 Presumably this is a kernel bug.
1343 Fixup ensures its a nop, we add one to the length for it. */
1344 && orig_rip != to + insn_len + 1)
1345 {
1346 if (debug_displaced)
1347 fprintf_unfiltered (gdb_stdlog,
1348 "displaced: syscall changed %%rip; "
1349 "not relocating\n");
1350 }
1351 else
1352 {
1353 ULONGEST rip = orig_rip - insn_offset;
1354
1355 /* If we just stepped over a breakpoint insn, we don't backup
1356 the pc on purpose; this is to match behaviour without
1357 stepping. */
1358
1359 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1360
1361 if (debug_displaced)
1362 fprintf_unfiltered (gdb_stdlog,
1363 "displaced: "
1364 "relocated %%rip from %s to %s\n",
1365 paddress (gdbarch, orig_rip),
1366 paddress (gdbarch, rip));
1367 }
1368 }
1369
1370 /* If the instruction was PUSHFL, then the TF bit will be set in the
1371 pushed value, and should be cleared. We'll leave this for later,
1372 since GDB already messes up the TF flag when stepping over a
1373 pushfl. */
1374
1375 /* If the instruction was a call, the return address now atop the
1376 stack is the address following the copied instruction. We need
1377 to make it the address following the original instruction. */
1378 if (amd64_call_p (insn_details))
1379 {
1380 ULONGEST rsp;
1381 ULONGEST retaddr;
1382 const ULONGEST retaddr_len = 8;
1383
1384 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1385 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1386 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1387 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1388
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "displaced: relocated return addr at %s "
1392 "to %s\n",
1393 paddress (gdbarch, rsp),
1394 paddress (gdbarch, retaddr));
1395 }
1396 }
1397 \f
1398 /* The maximum number of saved registers. This should include %rip. */
1399 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1400
1401 struct amd64_frame_cache
1402 {
1403 /* Base address. */
1404 CORE_ADDR base;
1405 CORE_ADDR sp_offset;
1406 CORE_ADDR pc;
1407
1408 /* Saved registers. */
1409 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1410 CORE_ADDR saved_sp;
1411 int saved_sp_reg;
1412
1413 /* Do we have a frame? */
1414 int frameless_p;
1415 };
1416
1417 /* Initialize a frame cache. */
1418
1419 static void
1420 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1421 {
1422 int i;
1423
1424 /* Base address. */
1425 cache->base = 0;
1426 cache->sp_offset = -8;
1427 cache->pc = 0;
1428
1429 /* Saved registers. We initialize these to -1 since zero is a valid
1430 offset (that's where %rbp is supposed to be stored).
1431 The values start out as being offsets, and are later converted to
1432 addresses (at which point -1 is interpreted as an address, still meaning
1433 "invalid"). */
1434 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1435 cache->saved_regs[i] = -1;
1436 cache->saved_sp = 0;
1437 cache->saved_sp_reg = -1;
1438
1439 /* Frameless until proven otherwise. */
1440 cache->frameless_p = 1;
1441 }
1442
1443 /* Allocate and initialize a frame cache. */
1444
1445 static struct amd64_frame_cache *
1446 amd64_alloc_frame_cache (void)
1447 {
1448 struct amd64_frame_cache *cache;
1449
1450 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1451 amd64_init_frame_cache (cache);
1452 return cache;
1453 }
1454
1455 /* GCC 4.4 and later, can put code in the prologue to realign the
1456 stack pointer. Check whether PC points to such code, and update
1457 CACHE accordingly. Return the first instruction after the code
1458 sequence or CURRENT_PC, whichever is smaller. If we don't
1459 recognize the code, return PC. */
1460
1461 static CORE_ADDR
1462 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1463 struct amd64_frame_cache *cache)
1464 {
1465 /* There are 2 code sequences to re-align stack before the frame
1466 gets set up:
1467
1468 1. Use a caller-saved saved register:
1469
1470 leaq 8(%rsp), %reg
1471 andq $-XXX, %rsp
1472 pushq -8(%reg)
1473
1474 2. Use a callee-saved saved register:
1475
1476 pushq %reg
1477 leaq 16(%rsp), %reg
1478 andq $-XXX, %rsp
1479 pushq -8(%reg)
1480
1481 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1482
1483 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1484 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1485 */
1486
1487 gdb_byte buf[18];
1488 int reg, r;
1489 int offset, offset_and;
1490
1491 if (target_read_memory (pc, buf, sizeof buf))
1492 return pc;
1493
1494 /* Check caller-saved saved register. The first instruction has
1495 to be "leaq 8(%rsp), %reg". */
1496 if ((buf[0] & 0xfb) == 0x48
1497 && buf[1] == 0x8d
1498 && buf[3] == 0x24
1499 && buf[4] == 0x8)
1500 {
1501 /* MOD must be binary 10 and R/M must be binary 100. */
1502 if ((buf[2] & 0xc7) != 0x44)
1503 return pc;
1504
1505 /* REG has register number. */
1506 reg = (buf[2] >> 3) & 7;
1507
1508 /* Check the REX.R bit. */
1509 if (buf[0] == 0x4c)
1510 reg += 8;
1511
1512 offset = 5;
1513 }
1514 else
1515 {
1516 /* Check callee-saved saved register. The first instruction
1517 has to be "pushq %reg". */
1518 reg = 0;
1519 if ((buf[0] & 0xf8) == 0x50)
1520 offset = 0;
1521 else if ((buf[0] & 0xf6) == 0x40
1522 && (buf[1] & 0xf8) == 0x50)
1523 {
1524 /* Check the REX.B bit. */
1525 if ((buf[0] & 1) != 0)
1526 reg = 8;
1527
1528 offset = 1;
1529 }
1530 else
1531 return pc;
1532
1533 /* Get register. */
1534 reg += buf[offset] & 0x7;
1535
1536 offset++;
1537
1538 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1539 if ((buf[offset] & 0xfb) != 0x48
1540 || buf[offset + 1] != 0x8d
1541 || buf[offset + 3] != 0x24
1542 || buf[offset + 4] != 0x10)
1543 return pc;
1544
1545 /* MOD must be binary 10 and R/M must be binary 100. */
1546 if ((buf[offset + 2] & 0xc7) != 0x44)
1547 return pc;
1548
1549 /* REG has register number. */
1550 r = (buf[offset + 2] >> 3) & 7;
1551
1552 /* Check the REX.R bit. */
1553 if (buf[offset] == 0x4c)
1554 r += 8;
1555
1556 /* Registers in pushq and leaq have to be the same. */
1557 if (reg != r)
1558 return pc;
1559
1560 offset += 5;
1561 }
1562
1563 /* Rigister can't be %rsp nor %rbp. */
1564 if (reg == 4 || reg == 5)
1565 return pc;
1566
1567 /* The next instruction has to be "andq $-XXX, %rsp". */
1568 if (buf[offset] != 0x48
1569 || buf[offset + 2] != 0xe4
1570 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1571 return pc;
1572
1573 offset_and = offset;
1574 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1575
1576 /* The next instruction has to be "pushq -8(%reg)". */
1577 r = 0;
1578 if (buf[offset] == 0xff)
1579 offset++;
1580 else if ((buf[offset] & 0xf6) == 0x40
1581 && buf[offset + 1] == 0xff)
1582 {
1583 /* Check the REX.B bit. */
1584 if ((buf[offset] & 0x1) != 0)
1585 r = 8;
1586 offset += 2;
1587 }
1588 else
1589 return pc;
1590
1591 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1592 01. */
1593 if (buf[offset + 1] != 0xf8
1594 || (buf[offset] & 0xf8) != 0x70)
1595 return pc;
1596
1597 /* R/M has register. */
1598 r += buf[offset] & 7;
1599
1600 /* Registers in leaq and pushq have to be the same. */
1601 if (reg != r)
1602 return pc;
1603
1604 if (current_pc > pc + offset_and)
1605 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1606
1607 return min (pc + offset + 2, current_pc);
1608 }
1609
1610 /* Do a limited analysis of the prologue at PC and update CACHE
1611 accordingly. Bail out early if CURRENT_PC is reached. Return the
1612 address where the analysis stopped.
1613
1614 We will handle only functions beginning with:
1615
1616 pushq %rbp 0x55
1617 movq %rsp, %rbp 0x48 0x89 0xe5
1618
1619 Any function that doesn't start with this sequence will be assumed
1620 to have no prologue and thus no valid frame pointer in %rbp. */
1621
1622 static CORE_ADDR
1623 amd64_analyze_prologue (struct gdbarch *gdbarch,
1624 CORE_ADDR pc, CORE_ADDR current_pc,
1625 struct amd64_frame_cache *cache)
1626 {
1627 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1628 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1629 gdb_byte buf[3];
1630 gdb_byte op;
1631
1632 if (current_pc <= pc)
1633 return current_pc;
1634
1635 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1636
1637 op = read_memory_unsigned_integer (pc, 1, byte_order);
1638
1639 if (op == 0x55) /* pushq %rbp */
1640 {
1641 /* Take into account that we've executed the `pushq %rbp' that
1642 starts this instruction sequence. */
1643 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1644 cache->sp_offset += 8;
1645
1646 /* If that's all, return now. */
1647 if (current_pc <= pc + 1)
1648 return current_pc;
1649
1650 /* Check for `movq %rsp, %rbp'. */
1651 read_memory (pc + 1, buf, 3);
1652 if (memcmp (buf, proto, 3) != 0)
1653 return pc + 1;
1654
1655 /* OK, we actually have a frame. */
1656 cache->frameless_p = 0;
1657 return pc + 4;
1658 }
1659
1660 return pc;
1661 }
1662
1663 /* Return PC of first real instruction. */
1664
1665 static CORE_ADDR
1666 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1667 {
1668 struct amd64_frame_cache cache;
1669 CORE_ADDR pc;
1670
1671 amd64_init_frame_cache (&cache);
1672 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1673 &cache);
1674 if (cache.frameless_p)
1675 return start_pc;
1676
1677 return pc;
1678 }
1679 \f
1680
1681 /* Normal frames. */
1682
1683 static struct amd64_frame_cache *
1684 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1685 {
1686 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1687 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1688 struct amd64_frame_cache *cache;
1689 gdb_byte buf[8];
1690 int i;
1691
1692 if (*this_cache)
1693 return *this_cache;
1694
1695 cache = amd64_alloc_frame_cache ();
1696 *this_cache = cache;
1697
1698 cache->pc = get_frame_func (this_frame);
1699 if (cache->pc != 0)
1700 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1701 cache);
1702
1703 if (cache->saved_sp_reg != -1)
1704 {
1705 /* Stack pointer has been saved. */
1706 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1707 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1708 }
1709
1710 if (cache->frameless_p)
1711 {
1712 /* We didn't find a valid frame. If we're at the start of a
1713 function, or somewhere half-way its prologue, the function's
1714 frame probably hasn't been fully setup yet. Try to
1715 reconstruct the base address for the stack frame by looking
1716 at the stack pointer. For truly "frameless" functions this
1717 might work too. */
1718
1719 if (cache->saved_sp_reg != -1)
1720 {
1721 /* We're halfway aligning the stack. */
1722 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1723 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1724
1725 /* This will be added back below. */
1726 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1727 }
1728 else
1729 {
1730 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1731 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1732 + cache->sp_offset;
1733 }
1734 }
1735 else
1736 {
1737 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1738 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1739 }
1740
1741 /* Now that we have the base address for the stack frame we can
1742 calculate the value of %rsp in the calling frame. */
1743 cache->saved_sp = cache->base + 16;
1744
1745 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1746 frame we find it at the same offset from the reconstructed base
1747 address. If we're halfway aligning the stack, %rip is handled
1748 differently (see above). */
1749 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1750 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1751
1752 /* Adjust all the saved registers such that they contain addresses
1753 instead of offsets. */
1754 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1755 if (cache->saved_regs[i] != -1)
1756 cache->saved_regs[i] += cache->base;
1757
1758 return cache;
1759 }
1760
1761 static void
1762 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1763 struct frame_id *this_id)
1764 {
1765 struct amd64_frame_cache *cache =
1766 amd64_frame_cache (this_frame, this_cache);
1767
1768 /* This marks the outermost frame. */
1769 if (cache->base == 0)
1770 return;
1771
1772 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1773 }
1774
1775 static struct value *
1776 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1777 int regnum)
1778 {
1779 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1780 struct amd64_frame_cache *cache =
1781 amd64_frame_cache (this_frame, this_cache);
1782
1783 gdb_assert (regnum >= 0);
1784
1785 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1786 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1787
1788 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1789 return frame_unwind_got_memory (this_frame, regnum,
1790 cache->saved_regs[regnum]);
1791
1792 return frame_unwind_got_register (this_frame, regnum, regnum);
1793 }
1794
1795 static const struct frame_unwind amd64_frame_unwind =
1796 {
1797 NORMAL_FRAME,
1798 amd64_frame_this_id,
1799 amd64_frame_prev_register,
1800 NULL,
1801 default_frame_sniffer
1802 };
1803 \f
1804
1805 /* Signal trampolines. */
1806
1807 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1808 64-bit variants. This would require using identical frame caches
1809 on both platforms. */
1810
1811 static struct amd64_frame_cache *
1812 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1813 {
1814 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1815 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1816 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1817 struct amd64_frame_cache *cache;
1818 CORE_ADDR addr;
1819 gdb_byte buf[8];
1820 int i;
1821
1822 if (*this_cache)
1823 return *this_cache;
1824
1825 cache = amd64_alloc_frame_cache ();
1826
1827 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1828 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1829
1830 addr = tdep->sigcontext_addr (this_frame);
1831 gdb_assert (tdep->sc_reg_offset);
1832 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1833 for (i = 0; i < tdep->sc_num_regs; i++)
1834 if (tdep->sc_reg_offset[i] != -1)
1835 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1836
1837 *this_cache = cache;
1838 return cache;
1839 }
1840
1841 static void
1842 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1843 void **this_cache, struct frame_id *this_id)
1844 {
1845 struct amd64_frame_cache *cache =
1846 amd64_sigtramp_frame_cache (this_frame, this_cache);
1847
1848 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1849 }
1850
1851 static struct value *
1852 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1853 void **this_cache, int regnum)
1854 {
1855 /* Make sure we've initialized the cache. */
1856 amd64_sigtramp_frame_cache (this_frame, this_cache);
1857
1858 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1859 }
1860
1861 static int
1862 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1863 struct frame_info *this_frame,
1864 void **this_cache)
1865 {
1866 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1867
1868 /* We shouldn't even bother if we don't have a sigcontext_addr
1869 handler. */
1870 if (tdep->sigcontext_addr == NULL)
1871 return 0;
1872
1873 if (tdep->sigtramp_p != NULL)
1874 {
1875 if (tdep->sigtramp_p (this_frame))
1876 return 1;
1877 }
1878
1879 if (tdep->sigtramp_start != 0)
1880 {
1881 CORE_ADDR pc = get_frame_pc (this_frame);
1882
1883 gdb_assert (tdep->sigtramp_end != 0);
1884 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1885 return 1;
1886 }
1887
1888 return 0;
1889 }
1890
1891 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1892 {
1893 SIGTRAMP_FRAME,
1894 amd64_sigtramp_frame_this_id,
1895 amd64_sigtramp_frame_prev_register,
1896 NULL,
1897 amd64_sigtramp_frame_sniffer
1898 };
1899 \f
1900
1901 static CORE_ADDR
1902 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1903 {
1904 struct amd64_frame_cache *cache =
1905 amd64_frame_cache (this_frame, this_cache);
1906
1907 return cache->base;
1908 }
1909
1910 static const struct frame_base amd64_frame_base =
1911 {
1912 &amd64_frame_unwind,
1913 amd64_frame_base_address,
1914 amd64_frame_base_address,
1915 amd64_frame_base_address
1916 };
1917
1918 /* Normal frames, but in a function epilogue. */
1919
1920 /* The epilogue is defined here as the 'ret' instruction, which will
1921 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1922 the function's stack frame. */
1923
1924 static int
1925 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1926 {
1927 gdb_byte insn;
1928
1929 if (target_read_memory (pc, &insn, 1))
1930 return 0; /* Can't read memory at pc. */
1931
1932 if (insn != 0xc3) /* 'ret' instruction. */
1933 return 0;
1934
1935 return 1;
1936 }
1937
1938 static int
1939 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1940 struct frame_info *this_frame,
1941 void **this_prologue_cache)
1942 {
1943 if (frame_relative_level (this_frame) == 0)
1944 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1945 get_frame_pc (this_frame));
1946 else
1947 return 0;
1948 }
1949
1950 static struct amd64_frame_cache *
1951 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1952 {
1953 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1954 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1955 struct amd64_frame_cache *cache;
1956 gdb_byte buf[8];
1957
1958 if (*this_cache)
1959 return *this_cache;
1960
1961 cache = amd64_alloc_frame_cache ();
1962 *this_cache = cache;
1963
1964 /* Cache base will be %esp plus cache->sp_offset (-8). */
1965 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1966 cache->base = extract_unsigned_integer (buf, 8,
1967 byte_order) + cache->sp_offset;
1968
1969 /* Cache pc will be the frame func. */
1970 cache->pc = get_frame_pc (this_frame);
1971
1972 /* The saved %esp will be at cache->base plus 16. */
1973 cache->saved_sp = cache->base + 16;
1974
1975 /* The saved %eip will be at cache->base plus 8. */
1976 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
1977
1978 return cache;
1979 }
1980
1981 static void
1982 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
1983 void **this_cache,
1984 struct frame_id *this_id)
1985 {
1986 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
1987 this_cache);
1988
1989 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
1990 }
1991
1992 static const struct frame_unwind amd64_epilogue_frame_unwind =
1993 {
1994 NORMAL_FRAME,
1995 amd64_epilogue_frame_this_id,
1996 amd64_frame_prev_register,
1997 NULL,
1998 amd64_epilogue_frame_sniffer
1999 };
2000
2001 static struct frame_id
2002 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2003 {
2004 CORE_ADDR fp;
2005
2006 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2007
2008 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2009 }
2010
2011 /* 16 byte align the SP per frame requirements. */
2012
2013 static CORE_ADDR
2014 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2015 {
2016 return sp & -(CORE_ADDR)16;
2017 }
2018 \f
2019
2020 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2021 in the floating-point register set REGSET to register cache
2022 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2023
2024 static void
2025 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2026 int regnum, const void *fpregs, size_t len)
2027 {
2028 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2029
2030 gdb_assert (len == tdep->sizeof_fpregset);
2031 amd64_supply_fxsave (regcache, regnum, fpregs);
2032 }
2033
2034 /* Collect register REGNUM from the register cache REGCACHE and store
2035 it in the buffer specified by FPREGS and LEN as described by the
2036 floating-point register set REGSET. If REGNUM is -1, do this for
2037 all registers in REGSET. */
2038
2039 static void
2040 amd64_collect_fpregset (const struct regset *regset,
2041 const struct regcache *regcache,
2042 int regnum, void *fpregs, size_t len)
2043 {
2044 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2045
2046 gdb_assert (len == tdep->sizeof_fpregset);
2047 amd64_collect_fxsave (regcache, regnum, fpregs);
2048 }
2049
2050 /* Return the appropriate register set for the core section identified
2051 by SECT_NAME and SECT_SIZE. */
2052
2053 static const struct regset *
2054 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2055 const char *sect_name, size_t sect_size)
2056 {
2057 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2058
2059 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2060 {
2061 if (tdep->fpregset == NULL)
2062 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2063 amd64_collect_fpregset);
2064
2065 return tdep->fpregset;
2066 }
2067
2068 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2069 }
2070 \f
2071
2072 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2073 %rdi. We expect its value to be a pointer to the jmp_buf structure
2074 from which we extract the address that we will land at. This
2075 address is copied into PC. This routine returns non-zero on
2076 success. */
2077
2078 static int
2079 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2080 {
2081 gdb_byte buf[8];
2082 CORE_ADDR jb_addr;
2083 struct gdbarch *gdbarch = get_frame_arch (frame);
2084 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2085 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2086
2087 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2088 longjmp will land. */
2089 if (jb_pc_offset == -1)
2090 return 0;
2091
2092 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2093 jb_addr= extract_typed_address
2094 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2095 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2096 return 0;
2097
2098 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2099
2100 return 1;
2101 }
2102
2103 static const int amd64_record_regmap[] =
2104 {
2105 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2106 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2107 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2108 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2109 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2110 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2111 };
2112
2113 void
2114 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2115 {
2116 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2117 const struct target_desc *tdesc = info.target_desc;
2118
2119 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2120 floating-point registers. */
2121 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2122
2123 if (! tdesc_has_registers (tdesc))
2124 tdesc = tdesc_amd64;
2125 tdep->tdesc = tdesc;
2126
2127 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2128 tdep->register_names = amd64_register_names;
2129
2130 /* AMD64 has an FPU and 16 SSE registers. */
2131 tdep->st0_regnum = AMD64_ST0_REGNUM;
2132 tdep->num_xmm_regs = 16;
2133
2134 /* This is what all the fuss is about. */
2135 set_gdbarch_long_bit (gdbarch, 64);
2136 set_gdbarch_long_long_bit (gdbarch, 64);
2137 set_gdbarch_ptr_bit (gdbarch, 64);
2138
2139 /* In contrast to the i386, on AMD64 a `long double' actually takes
2140 up 128 bits, even though it's still based on the i387 extended
2141 floating-point format which has only 80 significant bits. */
2142 set_gdbarch_long_double_bit (gdbarch, 128);
2143
2144 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2145
2146 /* Register numbers of various important registers. */
2147 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2148 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2149 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2150 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2151
2152 /* The "default" register numbering scheme for AMD64 is referred to
2153 as the "DWARF Register Number Mapping" in the System V psABI.
2154 The preferred debugging format for all known AMD64 targets is
2155 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2156 DWARF-1), but we provide the same mapping just in case. This
2157 mapping is also used for stabs, which GCC does support. */
2158 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2159 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2160
2161 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2162 be in use on any of the supported AMD64 targets. */
2163
2164 /* Call dummy code. */
2165 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2166 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2167 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2168 tdep->call_dummy_num_integer_regs =
2169 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2170 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2171 tdep->classify = amd64_classify;
2172
2173 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2174 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2175 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2176
2177 set_gdbarch_return_value (gdbarch, amd64_return_value);
2178
2179 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2180
2181 /* Avoid wiring in the MMX registers for now. */
2182 set_gdbarch_num_pseudo_regs (gdbarch, 0);
2183 tdep->mm0_regnum = -1;
2184
2185 tdep->record_regmap = amd64_record_regmap;
2186
2187 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2188
2189 /* Hook the function epilogue frame unwinder. This unwinder is
2190 appended to the list first, so that it supercedes the other
2191 unwinders in function epilogues. */
2192 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2193
2194 /* Hook the prologue-based frame unwinders. */
2195 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2196 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2197 frame_base_set_default (gdbarch, &amd64_frame_base);
2198
2199 /* If we have a register mapping, enable the generic core file support. */
2200 if (tdep->gregset_reg_offset)
2201 set_gdbarch_regset_from_core_section (gdbarch,
2202 amd64_regset_from_core_section);
2203
2204 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2205 }
2206
2207 /* Provide a prototype to silence -Wmissing-prototypes. */
2208 void _initialize_amd64_tdep (void);
2209
2210 void
2211 _initialize_amd64_tdep (void)
2212 {
2213 initialize_tdesc_amd64 ();
2214 }
2215 \f
2216
2217 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2218 sense that the instruction pointer and data pointer are simply
2219 64-bit offsets into the code segment and the data segment instead
2220 of a selector offset pair. The functions below store the upper 32
2221 bits of these pointers (instead of just the 16-bits of the segment
2222 selector). */
2223
2224 /* Fill register REGNUM in REGCACHE with the appropriate
2225 floating-point or SSE register value from *FXSAVE. If REGNUM is
2226 -1, do this for all registers. This function masks off any of the
2227 reserved bits in *FXSAVE. */
2228
2229 void
2230 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2231 const void *fxsave)
2232 {
2233 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2234 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2235
2236 i387_supply_fxsave (regcache, regnum, fxsave);
2237
2238 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2239 {
2240 const gdb_byte *regs = fxsave;
2241
2242 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2243 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2244 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2245 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2246 }
2247 }
2248
2249 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2250 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2251 all registers. This function doesn't touch any of the reserved
2252 bits in *FXSAVE. */
2253
2254 void
2255 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2256 void *fxsave)
2257 {
2258 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2259 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2260 gdb_byte *regs = fxsave;
2261
2262 i387_collect_fxsave (regcache, regnum, fxsave);
2263
2264 if (gdbarch_ptr_bit (gdbarch) == 64)
2265 {
2266 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2267 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2268 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2269 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2270 }
2271 }
This page took 0.124212 seconds and 4 git commands to generate.