2010-04-04 Stan Shebs <stan@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 #include "features/i386/amd64.c"
46
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
53
54 /* Register information. */
55
56 static const char *amd64_register_names[] =
57 {
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
59
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
63
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
67
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
71 "mxcsr",
72 };
73
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
76
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs[] =
79 {
80 AMD64_RDI_REGNUM, /* %rdi */
81 AMD64_RSI_REGNUM, /* %rsi */
82 AMD64_RDX_REGNUM, /* %rdx */
83 AMD64_RCX_REGNUM, /* %rcx */
84 8, /* %r8 */
85 9 /* %r9 */
86 };
87
88 /* DWARF Register Number Mapping as defined in the System V psABI,
89 section 3.6. */
90
91 static int amd64_dwarf_regmap[] =
92 {
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
95 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
96 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
97
98 /* Frame Pointer Register RBP. */
99 AMD64_RBP_REGNUM,
100
101 /* Stack Pointer Register RSP. */
102 AMD64_RSP_REGNUM,
103
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
106
107 /* Return Address RA. Mapped to RIP. */
108 AMD64_RIP_REGNUM,
109
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
112 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
113 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
114 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
115
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
118 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
119 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
120 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
121
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
124 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
125 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
126 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
127
128 /* Control and Status Flags Register. */
129 AMD64_EFLAGS_REGNUM,
130
131 /* Selector Registers. */
132 AMD64_ES_REGNUM,
133 AMD64_CS_REGNUM,
134 AMD64_SS_REGNUM,
135 AMD64_DS_REGNUM,
136 AMD64_FS_REGNUM,
137 AMD64_GS_REGNUM,
138 -1,
139 -1,
140
141 /* Segment Base Address Registers. */
142 -1,
143 -1,
144 -1,
145 -1,
146
147 /* Special Selector Registers. */
148 -1,
149 -1,
150
151 /* Floating Point Control Registers. */
152 AMD64_MXCSR_REGNUM,
153 AMD64_FCTRL_REGNUM,
154 AMD64_FSTAT_REGNUM
155 };
156
157 static const int amd64_dwarf_regmap_len =
158 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
159
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
162
163 static int
164 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
165 {
166 int regnum = -1;
167
168 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
169 regnum = amd64_dwarf_regmap[reg];
170
171 if (regnum == -1)
172 warning (_("Unmapped DWARF Register #%d encountered."), reg);
173
174 return regnum;
175 }
176
177 /* Map architectural register numbers to gdb register numbers. */
178
179 static const int amd64_arch_regmap[16] =
180 {
181 AMD64_RAX_REGNUM, /* %rax */
182 AMD64_RCX_REGNUM, /* %rcx */
183 AMD64_RDX_REGNUM, /* %rdx */
184 AMD64_RBX_REGNUM, /* %rbx */
185 AMD64_RSP_REGNUM, /* %rsp */
186 AMD64_RBP_REGNUM, /* %rbp */
187 AMD64_RSI_REGNUM, /* %rsi */
188 AMD64_RDI_REGNUM, /* %rdi */
189 AMD64_R8_REGNUM, /* %r8 */
190 AMD64_R9_REGNUM, /* %r9 */
191 AMD64_R10_REGNUM, /* %r10 */
192 AMD64_R11_REGNUM, /* %r11 */
193 AMD64_R12_REGNUM, /* %r12 */
194 AMD64_R13_REGNUM, /* %r13 */
195 AMD64_R14_REGNUM, /* %r14 */
196 AMD64_R15_REGNUM /* %r15 */
197 };
198
199 static const int amd64_arch_regmap_len =
200 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
201
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
204
205 static int
206 amd64_arch_reg_to_regnum (int reg)
207 {
208 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
209
210 return amd64_arch_regmap[reg];
211 }
212
213 /* Register names for byte pseudo-registers. */
214
215 static const char *amd64_byte_names[] =
216 {
217 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
218 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
219 "ah", "bh", "ch", "dh"
220 };
221
222 /* Number of lower byte registers. */
223 #define AMD64_NUM_LOWER_BYTE_REGS 16
224
225 /* Register names for word pseudo-registers. */
226
227 static const char *amd64_word_names[] =
228 {
229 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
230 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
231 };
232
233 /* Register names for dword pseudo-registers. */
234
235 static const char *amd64_dword_names[] =
236 {
237 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
238 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
239 };
240
241 /* Return the name of register REGNUM. */
242
243 static const char *
244 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
245 {
246 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
247 if (i386_byte_regnum_p (gdbarch, regnum))
248 return amd64_byte_names[regnum - tdep->al_regnum];
249 else if (i386_word_regnum_p (gdbarch, regnum))
250 return amd64_word_names[regnum - tdep->ax_regnum];
251 else if (i386_dword_regnum_p (gdbarch, regnum))
252 return amd64_dword_names[regnum - tdep->eax_regnum];
253 else
254 return i386_pseudo_register_name (gdbarch, regnum);
255 }
256
257 static void
258 amd64_pseudo_register_read (struct gdbarch *gdbarch,
259 struct regcache *regcache,
260 int regnum, gdb_byte *buf)
261 {
262 gdb_byte raw_buf[MAX_REGISTER_SIZE];
263 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
264
265 if (i386_byte_regnum_p (gdbarch, regnum))
266 {
267 int gpnum = regnum - tdep->al_regnum;
268
269 /* Extract (always little endian). */
270 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
271 {
272 /* Special handling for AH, BH, CH, DH. */
273 regcache_raw_read (regcache,
274 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
275 memcpy (buf, raw_buf + 1, 1);
276 }
277 else
278 {
279 regcache_raw_read (regcache, gpnum, raw_buf);
280 memcpy (buf, raw_buf, 1);
281 }
282 }
283 else if (i386_dword_regnum_p (gdbarch, regnum))
284 {
285 int gpnum = regnum - tdep->eax_regnum;
286 /* Extract (always little endian). */
287 regcache_raw_read (regcache, gpnum, raw_buf);
288 memcpy (buf, raw_buf, 4);
289 }
290 else
291 i386_pseudo_register_read (gdbarch, regcache, regnum, buf);
292 }
293
294 static void
295 amd64_pseudo_register_write (struct gdbarch *gdbarch,
296 struct regcache *regcache,
297 int regnum, const gdb_byte *buf)
298 {
299 gdb_byte raw_buf[MAX_REGISTER_SIZE];
300 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
301
302 if (i386_byte_regnum_p (gdbarch, regnum))
303 {
304 int gpnum = regnum - tdep->al_regnum;
305
306 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
307 {
308 /* Read ... AH, BH, CH, DH. */
309 regcache_raw_read (regcache,
310 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
311 /* ... Modify ... (always little endian). */
312 memcpy (raw_buf + 1, buf, 1);
313 /* ... Write. */
314 regcache_raw_write (regcache,
315 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
316 }
317 else
318 {
319 /* Read ... */
320 regcache_raw_read (regcache, gpnum, raw_buf);
321 /* ... Modify ... (always little endian). */
322 memcpy (raw_buf, buf, 1);
323 /* ... Write. */
324 regcache_raw_write (regcache, gpnum, raw_buf);
325 }
326 }
327 else if (i386_dword_regnum_p (gdbarch, regnum))
328 {
329 int gpnum = regnum - tdep->eax_regnum;
330
331 /* Read ... */
332 regcache_raw_read (regcache, gpnum, raw_buf);
333 /* ... Modify ... (always little endian). */
334 memcpy (raw_buf, buf, 4);
335 /* ... Write. */
336 regcache_raw_write (regcache, gpnum, raw_buf);
337 }
338 else
339 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
340 }
341
342 \f
343
344 /* Return the union class of CLASS1 and CLASS2. See the psABI for
345 details. */
346
347 static enum amd64_reg_class
348 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
349 {
350 /* Rule (a): If both classes are equal, this is the resulting class. */
351 if (class1 == class2)
352 return class1;
353
354 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
355 is the other class. */
356 if (class1 == AMD64_NO_CLASS)
357 return class2;
358 if (class2 == AMD64_NO_CLASS)
359 return class1;
360
361 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
362 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
363 return AMD64_MEMORY;
364
365 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
366 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
367 return AMD64_INTEGER;
368
369 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
370 MEMORY is used as class. */
371 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
372 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
373 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
374 return AMD64_MEMORY;
375
376 /* Rule (f): Otherwise class SSE is used. */
377 return AMD64_SSE;
378 }
379
380 /* Return non-zero if TYPE is a non-POD structure or union type. */
381
382 static int
383 amd64_non_pod_p (struct type *type)
384 {
385 /* ??? A class with a base class certainly isn't POD, but does this
386 catch all non-POD structure types? */
387 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
388 return 1;
389
390 return 0;
391 }
392
393 /* Classify TYPE according to the rules for aggregate (structures and
394 arrays) and union types, and store the result in CLASS. */
395
396 static void
397 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
398 {
399 int len = TYPE_LENGTH (type);
400
401 /* 1. If the size of an object is larger than two eightbytes, or in
402 C++, is a non-POD structure or union type, or contains
403 unaligned fields, it has class memory. */
404 if (len > 16 || amd64_non_pod_p (type))
405 {
406 class[0] = class[1] = AMD64_MEMORY;
407 return;
408 }
409
410 /* 2. Both eightbytes get initialized to class NO_CLASS. */
411 class[0] = class[1] = AMD64_NO_CLASS;
412
413 /* 3. Each field of an object is classified recursively so that
414 always two fields are considered. The resulting class is
415 calculated according to the classes of the fields in the
416 eightbyte: */
417
418 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
419 {
420 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
421
422 /* All fields in an array have the same type. */
423 amd64_classify (subtype, class);
424 if (len > 8 && class[1] == AMD64_NO_CLASS)
425 class[1] = class[0];
426 }
427 else
428 {
429 int i;
430
431 /* Structure or union. */
432 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
433 || TYPE_CODE (type) == TYPE_CODE_UNION);
434
435 for (i = 0; i < TYPE_NFIELDS (type); i++)
436 {
437 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
438 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
439 enum amd64_reg_class subclass[2];
440 int bitsize = TYPE_FIELD_BITSIZE (type, i);
441 int endpos;
442
443 if (bitsize == 0)
444 bitsize = TYPE_LENGTH (subtype) * 8;
445 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
446
447 /* Ignore static fields. */
448 if (field_is_static (&TYPE_FIELD (type, i)))
449 continue;
450
451 gdb_assert (pos == 0 || pos == 1);
452
453 amd64_classify (subtype, subclass);
454 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
455 if (bitsize <= 64 && pos == 0 && endpos == 1)
456 /* This is a bit of an odd case: We have a field that would
457 normally fit in one of the two eightbytes, except that
458 it is placed in a way that this field straddles them.
459 This has been seen with a structure containing an array.
460
461 The ABI is a bit unclear in this case, but we assume that
462 this field's class (stored in subclass[0]) must also be merged
463 into class[1]. In other words, our field has a piece stored
464 in the second eight-byte, and thus its class applies to
465 the second eight-byte as well.
466
467 In the case where the field length exceeds 8 bytes,
468 it should not be necessary to merge the field class
469 into class[1]. As LEN > 8, subclass[1] is necessarily
470 different from AMD64_NO_CLASS. If subclass[1] is equal
471 to subclass[0], then the normal class[1]/subclass[1]
472 merging will take care of everything. For subclass[1]
473 to be different from subclass[0], I can only see the case
474 where we have a SSE/SSEUP or X87/X87UP pair, which both
475 use up all 16 bytes of the aggregate, and are already
476 handled just fine (because each portion sits on its own
477 8-byte). */
478 class[1] = amd64_merge_classes (class[1], subclass[0]);
479 if (pos == 0)
480 class[1] = amd64_merge_classes (class[1], subclass[1]);
481 }
482 }
483
484 /* 4. Then a post merger cleanup is done: */
485
486 /* Rule (a): If one of the classes is MEMORY, the whole argument is
487 passed in memory. */
488 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
489 class[0] = class[1] = AMD64_MEMORY;
490
491 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
492 SSE. */
493 if (class[0] == AMD64_SSEUP)
494 class[0] = AMD64_SSE;
495 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
496 class[1] = AMD64_SSE;
497 }
498
499 /* Classify TYPE, and store the result in CLASS. */
500
501 void
502 amd64_classify (struct type *type, enum amd64_reg_class class[2])
503 {
504 enum type_code code = TYPE_CODE (type);
505 int len = TYPE_LENGTH (type);
506
507 class[0] = class[1] = AMD64_NO_CLASS;
508
509 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
510 long, long long, and pointers are in the INTEGER class. Similarly,
511 range types, used by languages such as Ada, are also in the INTEGER
512 class. */
513 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
514 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
515 || code == TYPE_CODE_CHAR
516 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
517 && (len == 1 || len == 2 || len == 4 || len == 8))
518 class[0] = AMD64_INTEGER;
519
520 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
521 are in class SSE. */
522 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
523 && (len == 4 || len == 8))
524 /* FIXME: __m64 . */
525 class[0] = AMD64_SSE;
526
527 /* Arguments of types __float128, _Decimal128 and __m128 are split into
528 two halves. The least significant ones belong to class SSE, the most
529 significant one to class SSEUP. */
530 else if (code == TYPE_CODE_DECFLOAT && len == 16)
531 /* FIXME: __float128, __m128. */
532 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
533
534 /* The 64-bit mantissa of arguments of type long double belongs to
535 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
536 class X87UP. */
537 else if (code == TYPE_CODE_FLT && len == 16)
538 /* Class X87 and X87UP. */
539 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
540
541 /* Aggregates. */
542 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
543 || code == TYPE_CODE_UNION)
544 amd64_classify_aggregate (type, class);
545 }
546
547 static enum return_value_convention
548 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
549 struct type *type, struct regcache *regcache,
550 gdb_byte *readbuf, const gdb_byte *writebuf)
551 {
552 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
553 enum amd64_reg_class class[2];
554 int len = TYPE_LENGTH (type);
555 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
556 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
557 int integer_reg = 0;
558 int sse_reg = 0;
559 int i;
560
561 gdb_assert (!(readbuf && writebuf));
562 gdb_assert (tdep->classify);
563
564 /* 1. Classify the return type with the classification algorithm. */
565 tdep->classify (type, class);
566
567 /* 2. If the type has class MEMORY, then the caller provides space
568 for the return value and passes the address of this storage in
569 %rdi as if it were the first argument to the function. In effect,
570 this address becomes a hidden first argument.
571
572 On return %rax will contain the address that has been passed in
573 by the caller in %rdi. */
574 if (class[0] == AMD64_MEMORY)
575 {
576 /* As indicated by the comment above, the ABI guarantees that we
577 can always find the return value just after the function has
578 returned. */
579
580 if (readbuf)
581 {
582 ULONGEST addr;
583
584 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
585 read_memory (addr, readbuf, TYPE_LENGTH (type));
586 }
587
588 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
589 }
590
591 gdb_assert (class[1] != AMD64_MEMORY);
592 gdb_assert (len <= 16);
593
594 for (i = 0; len > 0; i++, len -= 8)
595 {
596 int regnum = -1;
597 int offset = 0;
598
599 switch (class[i])
600 {
601 case AMD64_INTEGER:
602 /* 3. If the class is INTEGER, the next available register
603 of the sequence %rax, %rdx is used. */
604 regnum = integer_regnum[integer_reg++];
605 break;
606
607 case AMD64_SSE:
608 /* 4. If the class is SSE, the next available SSE register
609 of the sequence %xmm0, %xmm1 is used. */
610 regnum = sse_regnum[sse_reg++];
611 break;
612
613 case AMD64_SSEUP:
614 /* 5. If the class is SSEUP, the eightbyte is passed in the
615 upper half of the last used SSE register. */
616 gdb_assert (sse_reg > 0);
617 regnum = sse_regnum[sse_reg - 1];
618 offset = 8;
619 break;
620
621 case AMD64_X87:
622 /* 6. If the class is X87, the value is returned on the X87
623 stack in %st0 as 80-bit x87 number. */
624 regnum = AMD64_ST0_REGNUM;
625 if (writebuf)
626 i387_return_value (gdbarch, regcache);
627 break;
628
629 case AMD64_X87UP:
630 /* 7. If the class is X87UP, the value is returned together
631 with the previous X87 value in %st0. */
632 gdb_assert (i > 0 && class[0] == AMD64_X87);
633 regnum = AMD64_ST0_REGNUM;
634 offset = 8;
635 len = 2;
636 break;
637
638 case AMD64_NO_CLASS:
639 continue;
640
641 default:
642 gdb_assert (!"Unexpected register class.");
643 }
644
645 gdb_assert (regnum != -1);
646
647 if (readbuf)
648 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
649 readbuf + i * 8);
650 if (writebuf)
651 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
652 writebuf + i * 8);
653 }
654
655 return RETURN_VALUE_REGISTER_CONVENTION;
656 }
657 \f
658
659 static CORE_ADDR
660 amd64_push_arguments (struct regcache *regcache, int nargs,
661 struct value **args, CORE_ADDR sp, int struct_return)
662 {
663 struct gdbarch *gdbarch = get_regcache_arch (regcache);
664 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
665 int *integer_regs = tdep->call_dummy_integer_regs;
666 int num_integer_regs = tdep->call_dummy_num_integer_regs;
667
668 static int sse_regnum[] =
669 {
670 /* %xmm0 ... %xmm7 */
671 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
672 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
673 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
674 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
675 };
676 struct value **stack_args = alloca (nargs * sizeof (struct value *));
677 /* An array that mirrors the stack_args array. For all arguments
678 that are passed by MEMORY, if that argument's address also needs
679 to be stored in a register, the ARG_ADDR_REGNO array will contain
680 that register number (or a negative value otherwise). */
681 int *arg_addr_regno = alloca (nargs * sizeof (int));
682 int num_stack_args = 0;
683 int num_elements = 0;
684 int element = 0;
685 int integer_reg = 0;
686 int sse_reg = 0;
687 int i;
688
689 gdb_assert (tdep->classify);
690
691 /* Reserve a register for the "hidden" argument. */
692 if (struct_return)
693 integer_reg++;
694
695 for (i = 0; i < nargs; i++)
696 {
697 struct type *type = value_type (args[i]);
698 int len = TYPE_LENGTH (type);
699 enum amd64_reg_class class[2];
700 int needed_integer_regs = 0;
701 int needed_sse_regs = 0;
702 int j;
703
704 /* Classify argument. */
705 tdep->classify (type, class);
706
707 /* Calculate the number of integer and SSE registers needed for
708 this argument. */
709 for (j = 0; j < 2; j++)
710 {
711 if (class[j] == AMD64_INTEGER)
712 needed_integer_regs++;
713 else if (class[j] == AMD64_SSE)
714 needed_sse_regs++;
715 }
716
717 /* Check whether enough registers are available, and if the
718 argument should be passed in registers at all. */
719 if (integer_reg + needed_integer_regs > num_integer_regs
720 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
721 || (needed_integer_regs == 0 && needed_sse_regs == 0))
722 {
723 /* The argument will be passed on the stack. */
724 num_elements += ((len + 7) / 8);
725 stack_args[num_stack_args] = args[i];
726 /* If this is an AMD64_MEMORY argument whose address must also
727 be passed in one of the integer registers, reserve that
728 register and associate this value to that register so that
729 we can store the argument address as soon as we know it. */
730 if (class[0] == AMD64_MEMORY
731 && tdep->memory_args_by_pointer
732 && integer_reg < tdep->call_dummy_num_integer_regs)
733 arg_addr_regno[num_stack_args] =
734 tdep->call_dummy_integer_regs[integer_reg++];
735 else
736 arg_addr_regno[num_stack_args] = -1;
737 num_stack_args++;
738 }
739 else
740 {
741 /* The argument will be passed in registers. */
742 const gdb_byte *valbuf = value_contents (args[i]);
743 gdb_byte buf[8];
744
745 gdb_assert (len <= 16);
746
747 for (j = 0; len > 0; j++, len -= 8)
748 {
749 int regnum = -1;
750 int offset = 0;
751
752 switch (class[j])
753 {
754 case AMD64_INTEGER:
755 regnum = integer_regs[integer_reg++];
756 break;
757
758 case AMD64_SSE:
759 regnum = sse_regnum[sse_reg++];
760 break;
761
762 case AMD64_SSEUP:
763 gdb_assert (sse_reg > 0);
764 regnum = sse_regnum[sse_reg - 1];
765 offset = 8;
766 break;
767
768 default:
769 gdb_assert (!"Unexpected register class.");
770 }
771
772 gdb_assert (regnum != -1);
773 memset (buf, 0, sizeof buf);
774 memcpy (buf, valbuf + j * 8, min (len, 8));
775 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
776 }
777 }
778 }
779
780 /* Allocate space for the arguments on the stack. */
781 sp -= num_elements * 8;
782
783 /* The psABI says that "The end of the input argument area shall be
784 aligned on a 16 byte boundary." */
785 sp &= ~0xf;
786
787 /* Write out the arguments to the stack. */
788 for (i = 0; i < num_stack_args; i++)
789 {
790 struct type *type = value_type (stack_args[i]);
791 const gdb_byte *valbuf = value_contents (stack_args[i]);
792 int len = TYPE_LENGTH (type);
793 CORE_ADDR arg_addr = sp + element * 8;
794
795 write_memory (arg_addr, valbuf, len);
796 if (arg_addr_regno[i] >= 0)
797 {
798 /* We also need to store the address of that argument in
799 the given register. */
800 gdb_byte buf[8];
801 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
802
803 store_unsigned_integer (buf, 8, byte_order, arg_addr);
804 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
805 }
806 element += ((len + 7) / 8);
807 }
808
809 /* The psABI says that "For calls that may call functions that use
810 varargs or stdargs (prototype-less calls or calls to functions
811 containing ellipsis (...) in the declaration) %al is used as
812 hidden argument to specify the number of SSE registers used. */
813 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
814 return sp;
815 }
816
817 static CORE_ADDR
818 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
819 struct regcache *regcache, CORE_ADDR bp_addr,
820 int nargs, struct value **args, CORE_ADDR sp,
821 int struct_return, CORE_ADDR struct_addr)
822 {
823 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
825 gdb_byte buf[8];
826
827 /* Pass arguments. */
828 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
829
830 /* Pass "hidden" argument". */
831 if (struct_return)
832 {
833 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
834 /* The "hidden" argument is passed throught the first argument
835 register. */
836 const int arg_regnum = tdep->call_dummy_integer_regs[0];
837
838 store_unsigned_integer (buf, 8, byte_order, struct_addr);
839 regcache_cooked_write (regcache, arg_regnum, buf);
840 }
841
842 /* Reserve some memory on the stack for the integer-parameter registers,
843 if required by the ABI. */
844 if (tdep->integer_param_regs_saved_in_caller_frame)
845 sp -= tdep->call_dummy_num_integer_regs * 8;
846
847 /* Store return address. */
848 sp -= 8;
849 store_unsigned_integer (buf, 8, byte_order, bp_addr);
850 write_memory (sp, buf, 8);
851
852 /* Finally, update the stack pointer... */
853 store_unsigned_integer (buf, 8, byte_order, sp);
854 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
855
856 /* ...and fake a frame pointer. */
857 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
858
859 return sp + 16;
860 }
861 \f
862 /* Displaced instruction handling. */
863
864 /* A partially decoded instruction.
865 This contains enough details for displaced stepping purposes. */
866
867 struct amd64_insn
868 {
869 /* The number of opcode bytes. */
870 int opcode_len;
871 /* The offset of the rex prefix or -1 if not present. */
872 int rex_offset;
873 /* The offset to the first opcode byte. */
874 int opcode_offset;
875 /* The offset to the modrm byte or -1 if not present. */
876 int modrm_offset;
877
878 /* The raw instruction. */
879 gdb_byte *raw_insn;
880 };
881
882 struct displaced_step_closure
883 {
884 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
885 int tmp_used;
886 int tmp_regno;
887 ULONGEST tmp_save;
888
889 /* Details of the instruction. */
890 struct amd64_insn insn_details;
891
892 /* Amount of space allocated to insn_buf. */
893 int max_len;
894
895 /* The possibly modified insn.
896 This is a variable-length field. */
897 gdb_byte insn_buf[1];
898 };
899
900 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
901 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
902 at which point delete these in favor of libopcodes' versions). */
903
904 static const unsigned char onebyte_has_modrm[256] = {
905 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
906 /* ------------------------------- */
907 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
908 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
909 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
910 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
911 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
912 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
913 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
914 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
915 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
916 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
917 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
918 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
919 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
920 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
921 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
922 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
923 /* ------------------------------- */
924 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
925 };
926
927 static const unsigned char twobyte_has_modrm[256] = {
928 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
929 /* ------------------------------- */
930 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
931 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
932 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
933 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
934 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
935 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
936 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
937 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
938 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
939 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
940 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
941 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
942 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
943 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
944 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
945 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
946 /* ------------------------------- */
947 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
948 };
949
950 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
951
952 static int
953 rex_prefix_p (gdb_byte pfx)
954 {
955 return REX_PREFIX_P (pfx);
956 }
957
958 /* Skip the legacy instruction prefixes in INSN.
959 We assume INSN is properly sentineled so we don't have to worry
960 about falling off the end of the buffer. */
961
962 static gdb_byte *
963 amd64_skip_prefixes (gdb_byte *insn)
964 {
965 while (1)
966 {
967 switch (*insn)
968 {
969 case DATA_PREFIX_OPCODE:
970 case ADDR_PREFIX_OPCODE:
971 case CS_PREFIX_OPCODE:
972 case DS_PREFIX_OPCODE:
973 case ES_PREFIX_OPCODE:
974 case FS_PREFIX_OPCODE:
975 case GS_PREFIX_OPCODE:
976 case SS_PREFIX_OPCODE:
977 case LOCK_PREFIX_OPCODE:
978 case REPE_PREFIX_OPCODE:
979 case REPNE_PREFIX_OPCODE:
980 ++insn;
981 continue;
982 default:
983 break;
984 }
985 break;
986 }
987
988 return insn;
989 }
990
991 /* fprintf-function for amd64_insn_length.
992 This function is a nop, we don't want to print anything, we just want to
993 compute the length of the insn. */
994
995 static int ATTR_FORMAT (printf, 2, 3)
996 amd64_insn_length_fprintf (void *stream, const char *format, ...)
997 {
998 return 0;
999 }
1000
1001 /* Initialize a struct disassemble_info for amd64_insn_length. */
1002
1003 static void
1004 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
1005 struct disassemble_info *di,
1006 const gdb_byte *insn, int max_len,
1007 CORE_ADDR addr)
1008 {
1009 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
1010
1011 /* init_disassemble_info installs buffer_read_memory, etc.
1012 so we don't need to do that here.
1013 The cast is necessary until disassemble_info is const-ified. */
1014 di->buffer = (gdb_byte *) insn;
1015 di->buffer_length = max_len;
1016 di->buffer_vma = addr;
1017
1018 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
1019 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
1020 di->endian = gdbarch_byte_order (gdbarch);
1021 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
1022
1023 disassemble_init_for_target (di);
1024 }
1025
1026 /* Return the length in bytes of INSN.
1027 MAX_LEN is the size of the buffer containing INSN.
1028 libopcodes currently doesn't export a utility to compute the
1029 instruction length, so use the disassembler until then. */
1030
1031 static int
1032 amd64_insn_length (struct gdbarch *gdbarch,
1033 const gdb_byte *insn, int max_len, CORE_ADDR addr)
1034 {
1035 struct disassemble_info di;
1036
1037 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
1038
1039 return gdbarch_print_insn (gdbarch, addr, &di);
1040 }
1041
1042 /* Return an integer register (other than RSP) that is unused as an input
1043 operand in INSN.
1044 In order to not require adding a rex prefix if the insn doesn't already
1045 have one, the result is restricted to RAX ... RDI, sans RSP.
1046 The register numbering of the result follows architecture ordering,
1047 e.g. RDI = 7. */
1048
1049 static int
1050 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1051 {
1052 /* 1 bit for each reg */
1053 int used_regs_mask = 0;
1054
1055 /* There can be at most 3 int regs used as inputs in an insn, and we have
1056 7 to choose from (RAX ... RDI, sans RSP).
1057 This allows us to take a conservative approach and keep things simple.
1058 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1059 that implicitly specify RAX. */
1060
1061 /* Avoid RAX. */
1062 used_regs_mask |= 1 << EAX_REG_NUM;
1063 /* Similarily avoid RDX, implicit operand in divides. */
1064 used_regs_mask |= 1 << EDX_REG_NUM;
1065 /* Avoid RSP. */
1066 used_regs_mask |= 1 << ESP_REG_NUM;
1067
1068 /* If the opcode is one byte long and there's no ModRM byte,
1069 assume the opcode specifies a register. */
1070 if (details->opcode_len == 1 && details->modrm_offset == -1)
1071 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1072
1073 /* Mark used regs in the modrm/sib bytes. */
1074 if (details->modrm_offset != -1)
1075 {
1076 int modrm = details->raw_insn[details->modrm_offset];
1077 int mod = MODRM_MOD_FIELD (modrm);
1078 int reg = MODRM_REG_FIELD (modrm);
1079 int rm = MODRM_RM_FIELD (modrm);
1080 int have_sib = mod != 3 && rm == 4;
1081
1082 /* Assume the reg field of the modrm byte specifies a register. */
1083 used_regs_mask |= 1 << reg;
1084
1085 if (have_sib)
1086 {
1087 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1088 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1089 used_regs_mask |= 1 << base;
1090 used_regs_mask |= 1 << index;
1091 }
1092 else
1093 {
1094 used_regs_mask |= 1 << rm;
1095 }
1096 }
1097
1098 gdb_assert (used_regs_mask < 256);
1099 gdb_assert (used_regs_mask != 255);
1100
1101 /* Finally, find a free reg. */
1102 {
1103 int i;
1104
1105 for (i = 0; i < 8; ++i)
1106 {
1107 if (! (used_regs_mask & (1 << i)))
1108 return i;
1109 }
1110
1111 /* We shouldn't get here. */
1112 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1113 }
1114 }
1115
1116 /* Extract the details of INSN that we need. */
1117
1118 static void
1119 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1120 {
1121 gdb_byte *start = insn;
1122 int need_modrm;
1123
1124 details->raw_insn = insn;
1125
1126 details->opcode_len = -1;
1127 details->rex_offset = -1;
1128 details->opcode_offset = -1;
1129 details->modrm_offset = -1;
1130
1131 /* Skip legacy instruction prefixes. */
1132 insn = amd64_skip_prefixes (insn);
1133
1134 /* Skip REX instruction prefix. */
1135 if (rex_prefix_p (*insn))
1136 {
1137 details->rex_offset = insn - start;
1138 ++insn;
1139 }
1140
1141 details->opcode_offset = insn - start;
1142
1143 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1144 {
1145 /* Two or three-byte opcode. */
1146 ++insn;
1147 need_modrm = twobyte_has_modrm[*insn];
1148
1149 /* Check for three-byte opcode. */
1150 switch (*insn)
1151 {
1152 case 0x24:
1153 case 0x25:
1154 case 0x38:
1155 case 0x3a:
1156 case 0x7a:
1157 case 0x7b:
1158 ++insn;
1159 details->opcode_len = 3;
1160 break;
1161 default:
1162 details->opcode_len = 2;
1163 break;
1164 }
1165 }
1166 else
1167 {
1168 /* One-byte opcode. */
1169 need_modrm = onebyte_has_modrm[*insn];
1170 details->opcode_len = 1;
1171 }
1172
1173 if (need_modrm)
1174 {
1175 ++insn;
1176 details->modrm_offset = insn - start;
1177 }
1178 }
1179
1180 /* Update %rip-relative addressing in INSN.
1181
1182 %rip-relative addressing only uses a 32-bit displacement.
1183 32 bits is not enough to be guaranteed to cover the distance between where
1184 the real instruction is and where its copy is.
1185 Convert the insn to use base+disp addressing.
1186 We set base = pc + insn_length so we can leave disp unchanged. */
1187
1188 static void
1189 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1190 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1191 {
1192 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1193 const struct amd64_insn *insn_details = &dsc->insn_details;
1194 int modrm_offset = insn_details->modrm_offset;
1195 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1196 CORE_ADDR rip_base;
1197 int32_t disp;
1198 int insn_length;
1199 int arch_tmp_regno, tmp_regno;
1200 ULONGEST orig_value;
1201
1202 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1203 ++insn;
1204
1205 /* Compute the rip-relative address. */
1206 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1207 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1208 rip_base = from + insn_length;
1209
1210 /* We need a register to hold the address.
1211 Pick one not used in the insn.
1212 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1213 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1214 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1215
1216 /* REX.B should be unset as we were using rip-relative addressing,
1217 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1218 if (insn_details->rex_offset != -1)
1219 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1220
1221 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1222 dsc->tmp_regno = tmp_regno;
1223 dsc->tmp_save = orig_value;
1224 dsc->tmp_used = 1;
1225
1226 /* Convert the ModRM field to be base+disp. */
1227 dsc->insn_buf[modrm_offset] &= ~0xc7;
1228 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1229
1230 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1231
1232 if (debug_displaced)
1233 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1234 "displaced: using temp reg %d, old value %s, new value %s\n",
1235 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1236 paddress (gdbarch, rip_base));
1237 }
1238
1239 static void
1240 fixup_displaced_copy (struct gdbarch *gdbarch,
1241 struct displaced_step_closure *dsc,
1242 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1243 {
1244 const struct amd64_insn *details = &dsc->insn_details;
1245
1246 if (details->modrm_offset != -1)
1247 {
1248 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1249
1250 if ((modrm & 0xc7) == 0x05)
1251 {
1252 /* The insn uses rip-relative addressing.
1253 Deal with it. */
1254 fixup_riprel (gdbarch, dsc, from, to, regs);
1255 }
1256 }
1257 }
1258
1259 struct displaced_step_closure *
1260 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1261 CORE_ADDR from, CORE_ADDR to,
1262 struct regcache *regs)
1263 {
1264 int len = gdbarch_max_insn_length (gdbarch);
1265 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1266 continually watch for running off the end of the buffer. */
1267 int fixup_sentinel_space = len;
1268 struct displaced_step_closure *dsc =
1269 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1270 gdb_byte *buf = &dsc->insn_buf[0];
1271 struct amd64_insn *details = &dsc->insn_details;
1272
1273 dsc->tmp_used = 0;
1274 dsc->max_len = len + fixup_sentinel_space;
1275
1276 read_memory (from, buf, len);
1277
1278 /* Set up the sentinel space so we don't have to worry about running
1279 off the end of the buffer. An excessive number of leading prefixes
1280 could otherwise cause this. */
1281 memset (buf + len, 0, fixup_sentinel_space);
1282
1283 amd64_get_insn_details (buf, details);
1284
1285 /* GDB may get control back after the insn after the syscall.
1286 Presumably this is a kernel bug.
1287 If this is a syscall, make sure there's a nop afterwards. */
1288 {
1289 int syscall_length;
1290
1291 if (amd64_syscall_p (details, &syscall_length))
1292 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1293 }
1294
1295 /* Modify the insn to cope with the address where it will be executed from.
1296 In particular, handle any rip-relative addressing. */
1297 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1298
1299 write_memory (to, buf, len);
1300
1301 if (debug_displaced)
1302 {
1303 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1304 paddress (gdbarch, from), paddress (gdbarch, to));
1305 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1306 }
1307
1308 return dsc;
1309 }
1310
1311 static int
1312 amd64_absolute_jmp_p (const struct amd64_insn *details)
1313 {
1314 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1315
1316 if (insn[0] == 0xff)
1317 {
1318 /* jump near, absolute indirect (/4) */
1319 if ((insn[1] & 0x38) == 0x20)
1320 return 1;
1321
1322 /* jump far, absolute indirect (/5) */
1323 if ((insn[1] & 0x38) == 0x28)
1324 return 1;
1325 }
1326
1327 return 0;
1328 }
1329
1330 static int
1331 amd64_absolute_call_p (const struct amd64_insn *details)
1332 {
1333 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1334
1335 if (insn[0] == 0xff)
1336 {
1337 /* Call near, absolute indirect (/2) */
1338 if ((insn[1] & 0x38) == 0x10)
1339 return 1;
1340
1341 /* Call far, absolute indirect (/3) */
1342 if ((insn[1] & 0x38) == 0x18)
1343 return 1;
1344 }
1345
1346 return 0;
1347 }
1348
1349 static int
1350 amd64_ret_p (const struct amd64_insn *details)
1351 {
1352 /* NOTE: gcc can emit "repz ; ret". */
1353 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1354
1355 switch (insn[0])
1356 {
1357 case 0xc2: /* ret near, pop N bytes */
1358 case 0xc3: /* ret near */
1359 case 0xca: /* ret far, pop N bytes */
1360 case 0xcb: /* ret far */
1361 case 0xcf: /* iret */
1362 return 1;
1363
1364 default:
1365 return 0;
1366 }
1367 }
1368
1369 static int
1370 amd64_call_p (const struct amd64_insn *details)
1371 {
1372 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1373
1374 if (amd64_absolute_call_p (details))
1375 return 1;
1376
1377 /* call near, relative */
1378 if (insn[0] == 0xe8)
1379 return 1;
1380
1381 return 0;
1382 }
1383
1384 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1385 length in bytes. Otherwise, return zero. */
1386
1387 static int
1388 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1389 {
1390 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1391
1392 if (insn[0] == 0x0f && insn[1] == 0x05)
1393 {
1394 *lengthp = 2;
1395 return 1;
1396 }
1397
1398 return 0;
1399 }
1400
1401 /* Fix up the state of registers and memory after having single-stepped
1402 a displaced instruction. */
1403
1404 void
1405 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1406 struct displaced_step_closure *dsc,
1407 CORE_ADDR from, CORE_ADDR to,
1408 struct regcache *regs)
1409 {
1410 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1411 /* The offset we applied to the instruction's address. */
1412 ULONGEST insn_offset = to - from;
1413 gdb_byte *insn = dsc->insn_buf;
1414 const struct amd64_insn *insn_details = &dsc->insn_details;
1415
1416 if (debug_displaced)
1417 fprintf_unfiltered (gdb_stdlog,
1418 "displaced: fixup (%s, %s), "
1419 "insn = 0x%02x 0x%02x ...\n",
1420 paddress (gdbarch, from), paddress (gdbarch, to),
1421 insn[0], insn[1]);
1422
1423 /* If we used a tmp reg, restore it. */
1424
1425 if (dsc->tmp_used)
1426 {
1427 if (debug_displaced)
1428 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1429 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1430 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1431 }
1432
1433 /* The list of issues to contend with here is taken from
1434 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1435 Yay for Free Software! */
1436
1437 /* Relocate the %rip back to the program's instruction stream,
1438 if necessary. */
1439
1440 /* Except in the case of absolute or indirect jump or call
1441 instructions, or a return instruction, the new rip is relative to
1442 the displaced instruction; make it relative to the original insn.
1443 Well, signal handler returns don't need relocation either, but we use the
1444 value of %rip to recognize those; see below. */
1445 if (! amd64_absolute_jmp_p (insn_details)
1446 && ! amd64_absolute_call_p (insn_details)
1447 && ! amd64_ret_p (insn_details))
1448 {
1449 ULONGEST orig_rip;
1450 int insn_len;
1451
1452 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1453
1454 /* A signal trampoline system call changes the %rip, resuming
1455 execution of the main program after the signal handler has
1456 returned. That makes them like 'return' instructions; we
1457 shouldn't relocate %rip.
1458
1459 But most system calls don't, and we do need to relocate %rip.
1460
1461 Our heuristic for distinguishing these cases: if stepping
1462 over the system call instruction left control directly after
1463 the instruction, the we relocate --- control almost certainly
1464 doesn't belong in the displaced copy. Otherwise, we assume
1465 the instruction has put control where it belongs, and leave
1466 it unrelocated. Goodness help us if there are PC-relative
1467 system calls. */
1468 if (amd64_syscall_p (insn_details, &insn_len)
1469 && orig_rip != to + insn_len
1470 /* GDB can get control back after the insn after the syscall.
1471 Presumably this is a kernel bug.
1472 Fixup ensures its a nop, we add one to the length for it. */
1473 && orig_rip != to + insn_len + 1)
1474 {
1475 if (debug_displaced)
1476 fprintf_unfiltered (gdb_stdlog,
1477 "displaced: syscall changed %%rip; "
1478 "not relocating\n");
1479 }
1480 else
1481 {
1482 ULONGEST rip = orig_rip - insn_offset;
1483
1484 /* If we just stepped over a breakpoint insn, we don't backup
1485 the pc on purpose; this is to match behaviour without
1486 stepping. */
1487
1488 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1489
1490 if (debug_displaced)
1491 fprintf_unfiltered (gdb_stdlog,
1492 "displaced: "
1493 "relocated %%rip from %s to %s\n",
1494 paddress (gdbarch, orig_rip),
1495 paddress (gdbarch, rip));
1496 }
1497 }
1498
1499 /* If the instruction was PUSHFL, then the TF bit will be set in the
1500 pushed value, and should be cleared. We'll leave this for later,
1501 since GDB already messes up the TF flag when stepping over a
1502 pushfl. */
1503
1504 /* If the instruction was a call, the return address now atop the
1505 stack is the address following the copied instruction. We need
1506 to make it the address following the original instruction. */
1507 if (amd64_call_p (insn_details))
1508 {
1509 ULONGEST rsp;
1510 ULONGEST retaddr;
1511 const ULONGEST retaddr_len = 8;
1512
1513 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1514 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1515 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1516 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1517
1518 if (debug_displaced)
1519 fprintf_unfiltered (gdb_stdlog,
1520 "displaced: relocated return addr at %s "
1521 "to %s\n",
1522 paddress (gdbarch, rsp),
1523 paddress (gdbarch, retaddr));
1524 }
1525 }
1526 \f
1527 /* The maximum number of saved registers. This should include %rip. */
1528 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1529
1530 struct amd64_frame_cache
1531 {
1532 /* Base address. */
1533 CORE_ADDR base;
1534 CORE_ADDR sp_offset;
1535 CORE_ADDR pc;
1536
1537 /* Saved registers. */
1538 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1539 CORE_ADDR saved_sp;
1540 int saved_sp_reg;
1541
1542 /* Do we have a frame? */
1543 int frameless_p;
1544 };
1545
1546 /* Initialize a frame cache. */
1547
1548 static void
1549 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1550 {
1551 int i;
1552
1553 /* Base address. */
1554 cache->base = 0;
1555 cache->sp_offset = -8;
1556 cache->pc = 0;
1557
1558 /* Saved registers. We initialize these to -1 since zero is a valid
1559 offset (that's where %rbp is supposed to be stored).
1560 The values start out as being offsets, and are later converted to
1561 addresses (at which point -1 is interpreted as an address, still meaning
1562 "invalid"). */
1563 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1564 cache->saved_regs[i] = -1;
1565 cache->saved_sp = 0;
1566 cache->saved_sp_reg = -1;
1567
1568 /* Frameless until proven otherwise. */
1569 cache->frameless_p = 1;
1570 }
1571
1572 /* Allocate and initialize a frame cache. */
1573
1574 static struct amd64_frame_cache *
1575 amd64_alloc_frame_cache (void)
1576 {
1577 struct amd64_frame_cache *cache;
1578
1579 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1580 amd64_init_frame_cache (cache);
1581 return cache;
1582 }
1583
1584 /* GCC 4.4 and later, can put code in the prologue to realign the
1585 stack pointer. Check whether PC points to such code, and update
1586 CACHE accordingly. Return the first instruction after the code
1587 sequence or CURRENT_PC, whichever is smaller. If we don't
1588 recognize the code, return PC. */
1589
1590 static CORE_ADDR
1591 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1592 struct amd64_frame_cache *cache)
1593 {
1594 /* There are 2 code sequences to re-align stack before the frame
1595 gets set up:
1596
1597 1. Use a caller-saved saved register:
1598
1599 leaq 8(%rsp), %reg
1600 andq $-XXX, %rsp
1601 pushq -8(%reg)
1602
1603 2. Use a callee-saved saved register:
1604
1605 pushq %reg
1606 leaq 16(%rsp), %reg
1607 andq $-XXX, %rsp
1608 pushq -8(%reg)
1609
1610 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1611
1612 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1613 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1614 */
1615
1616 gdb_byte buf[18];
1617 int reg, r;
1618 int offset, offset_and;
1619
1620 if (target_read_memory (pc, buf, sizeof buf))
1621 return pc;
1622
1623 /* Check caller-saved saved register. The first instruction has
1624 to be "leaq 8(%rsp), %reg". */
1625 if ((buf[0] & 0xfb) == 0x48
1626 && buf[1] == 0x8d
1627 && buf[3] == 0x24
1628 && buf[4] == 0x8)
1629 {
1630 /* MOD must be binary 10 and R/M must be binary 100. */
1631 if ((buf[2] & 0xc7) != 0x44)
1632 return pc;
1633
1634 /* REG has register number. */
1635 reg = (buf[2] >> 3) & 7;
1636
1637 /* Check the REX.R bit. */
1638 if (buf[0] == 0x4c)
1639 reg += 8;
1640
1641 offset = 5;
1642 }
1643 else
1644 {
1645 /* Check callee-saved saved register. The first instruction
1646 has to be "pushq %reg". */
1647 reg = 0;
1648 if ((buf[0] & 0xf8) == 0x50)
1649 offset = 0;
1650 else if ((buf[0] & 0xf6) == 0x40
1651 && (buf[1] & 0xf8) == 0x50)
1652 {
1653 /* Check the REX.B bit. */
1654 if ((buf[0] & 1) != 0)
1655 reg = 8;
1656
1657 offset = 1;
1658 }
1659 else
1660 return pc;
1661
1662 /* Get register. */
1663 reg += buf[offset] & 0x7;
1664
1665 offset++;
1666
1667 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1668 if ((buf[offset] & 0xfb) != 0x48
1669 || buf[offset + 1] != 0x8d
1670 || buf[offset + 3] != 0x24
1671 || buf[offset + 4] != 0x10)
1672 return pc;
1673
1674 /* MOD must be binary 10 and R/M must be binary 100. */
1675 if ((buf[offset + 2] & 0xc7) != 0x44)
1676 return pc;
1677
1678 /* REG has register number. */
1679 r = (buf[offset + 2] >> 3) & 7;
1680
1681 /* Check the REX.R bit. */
1682 if (buf[offset] == 0x4c)
1683 r += 8;
1684
1685 /* Registers in pushq and leaq have to be the same. */
1686 if (reg != r)
1687 return pc;
1688
1689 offset += 5;
1690 }
1691
1692 /* Rigister can't be %rsp nor %rbp. */
1693 if (reg == 4 || reg == 5)
1694 return pc;
1695
1696 /* The next instruction has to be "andq $-XXX, %rsp". */
1697 if (buf[offset] != 0x48
1698 || buf[offset + 2] != 0xe4
1699 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1700 return pc;
1701
1702 offset_and = offset;
1703 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1704
1705 /* The next instruction has to be "pushq -8(%reg)". */
1706 r = 0;
1707 if (buf[offset] == 0xff)
1708 offset++;
1709 else if ((buf[offset] & 0xf6) == 0x40
1710 && buf[offset + 1] == 0xff)
1711 {
1712 /* Check the REX.B bit. */
1713 if ((buf[offset] & 0x1) != 0)
1714 r = 8;
1715 offset += 2;
1716 }
1717 else
1718 return pc;
1719
1720 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1721 01. */
1722 if (buf[offset + 1] != 0xf8
1723 || (buf[offset] & 0xf8) != 0x70)
1724 return pc;
1725
1726 /* R/M has register. */
1727 r += buf[offset] & 7;
1728
1729 /* Registers in leaq and pushq have to be the same. */
1730 if (reg != r)
1731 return pc;
1732
1733 if (current_pc > pc + offset_and)
1734 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1735
1736 return min (pc + offset + 2, current_pc);
1737 }
1738
1739 /* Do a limited analysis of the prologue at PC and update CACHE
1740 accordingly. Bail out early if CURRENT_PC is reached. Return the
1741 address where the analysis stopped.
1742
1743 We will handle only functions beginning with:
1744
1745 pushq %rbp 0x55
1746 movq %rsp, %rbp 0x48 0x89 0xe5
1747
1748 Any function that doesn't start with this sequence will be assumed
1749 to have no prologue and thus no valid frame pointer in %rbp. */
1750
1751 static CORE_ADDR
1752 amd64_analyze_prologue (struct gdbarch *gdbarch,
1753 CORE_ADDR pc, CORE_ADDR current_pc,
1754 struct amd64_frame_cache *cache)
1755 {
1756 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1757 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1758 gdb_byte buf[3];
1759 gdb_byte op;
1760
1761 if (current_pc <= pc)
1762 return current_pc;
1763
1764 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1765
1766 op = read_memory_unsigned_integer (pc, 1, byte_order);
1767
1768 if (op == 0x55) /* pushq %rbp */
1769 {
1770 /* Take into account that we've executed the `pushq %rbp' that
1771 starts this instruction sequence. */
1772 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1773 cache->sp_offset += 8;
1774
1775 /* If that's all, return now. */
1776 if (current_pc <= pc + 1)
1777 return current_pc;
1778
1779 /* Check for `movq %rsp, %rbp'. */
1780 read_memory (pc + 1, buf, 3);
1781 if (memcmp (buf, proto, 3) != 0)
1782 return pc + 1;
1783
1784 /* OK, we actually have a frame. */
1785 cache->frameless_p = 0;
1786 return pc + 4;
1787 }
1788
1789 return pc;
1790 }
1791
1792 /* Return PC of first real instruction. */
1793
1794 static CORE_ADDR
1795 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1796 {
1797 struct amd64_frame_cache cache;
1798 CORE_ADDR pc;
1799
1800 amd64_init_frame_cache (&cache);
1801 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1802 &cache);
1803 if (cache.frameless_p)
1804 return start_pc;
1805
1806 return pc;
1807 }
1808 \f
1809
1810 /* Normal frames. */
1811
1812 static struct amd64_frame_cache *
1813 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1814 {
1815 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1816 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1817 struct amd64_frame_cache *cache;
1818 gdb_byte buf[8];
1819 int i;
1820
1821 if (*this_cache)
1822 return *this_cache;
1823
1824 cache = amd64_alloc_frame_cache ();
1825 *this_cache = cache;
1826
1827 cache->pc = get_frame_func (this_frame);
1828 if (cache->pc != 0)
1829 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1830 cache);
1831
1832 if (cache->saved_sp_reg != -1)
1833 {
1834 /* Stack pointer has been saved. */
1835 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1836 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1837 }
1838
1839 if (cache->frameless_p)
1840 {
1841 /* We didn't find a valid frame. If we're at the start of a
1842 function, or somewhere half-way its prologue, the function's
1843 frame probably hasn't been fully setup yet. Try to
1844 reconstruct the base address for the stack frame by looking
1845 at the stack pointer. For truly "frameless" functions this
1846 might work too. */
1847
1848 if (cache->saved_sp_reg != -1)
1849 {
1850 /* We're halfway aligning the stack. */
1851 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1852 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1853
1854 /* This will be added back below. */
1855 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1856 }
1857 else
1858 {
1859 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1860 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1861 + cache->sp_offset;
1862 }
1863 }
1864 else
1865 {
1866 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1867 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1868 }
1869
1870 /* Now that we have the base address for the stack frame we can
1871 calculate the value of %rsp in the calling frame. */
1872 cache->saved_sp = cache->base + 16;
1873
1874 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1875 frame we find it at the same offset from the reconstructed base
1876 address. If we're halfway aligning the stack, %rip is handled
1877 differently (see above). */
1878 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1879 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1880
1881 /* Adjust all the saved registers such that they contain addresses
1882 instead of offsets. */
1883 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1884 if (cache->saved_regs[i] != -1)
1885 cache->saved_regs[i] += cache->base;
1886
1887 return cache;
1888 }
1889
1890 static void
1891 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1892 struct frame_id *this_id)
1893 {
1894 struct amd64_frame_cache *cache =
1895 amd64_frame_cache (this_frame, this_cache);
1896
1897 /* This marks the outermost frame. */
1898 if (cache->base == 0)
1899 return;
1900
1901 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1902 }
1903
1904 static struct value *
1905 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1906 int regnum)
1907 {
1908 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1909 struct amd64_frame_cache *cache =
1910 amd64_frame_cache (this_frame, this_cache);
1911
1912 gdb_assert (regnum >= 0);
1913
1914 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1915 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1916
1917 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1918 return frame_unwind_got_memory (this_frame, regnum,
1919 cache->saved_regs[regnum]);
1920
1921 return frame_unwind_got_register (this_frame, regnum, regnum);
1922 }
1923
1924 static const struct frame_unwind amd64_frame_unwind =
1925 {
1926 NORMAL_FRAME,
1927 amd64_frame_this_id,
1928 amd64_frame_prev_register,
1929 NULL,
1930 default_frame_sniffer
1931 };
1932 \f
1933
1934 /* Signal trampolines. */
1935
1936 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1937 64-bit variants. This would require using identical frame caches
1938 on both platforms. */
1939
1940 static struct amd64_frame_cache *
1941 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1942 {
1943 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1944 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1945 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1946 struct amd64_frame_cache *cache;
1947 CORE_ADDR addr;
1948 gdb_byte buf[8];
1949 int i;
1950
1951 if (*this_cache)
1952 return *this_cache;
1953
1954 cache = amd64_alloc_frame_cache ();
1955
1956 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1957 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1958
1959 addr = tdep->sigcontext_addr (this_frame);
1960 gdb_assert (tdep->sc_reg_offset);
1961 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1962 for (i = 0; i < tdep->sc_num_regs; i++)
1963 if (tdep->sc_reg_offset[i] != -1)
1964 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1965
1966 *this_cache = cache;
1967 return cache;
1968 }
1969
1970 static void
1971 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1972 void **this_cache, struct frame_id *this_id)
1973 {
1974 struct amd64_frame_cache *cache =
1975 amd64_sigtramp_frame_cache (this_frame, this_cache);
1976
1977 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1978 }
1979
1980 static struct value *
1981 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1982 void **this_cache, int regnum)
1983 {
1984 /* Make sure we've initialized the cache. */
1985 amd64_sigtramp_frame_cache (this_frame, this_cache);
1986
1987 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1988 }
1989
1990 static int
1991 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1992 struct frame_info *this_frame,
1993 void **this_cache)
1994 {
1995 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1996
1997 /* We shouldn't even bother if we don't have a sigcontext_addr
1998 handler. */
1999 if (tdep->sigcontext_addr == NULL)
2000 return 0;
2001
2002 if (tdep->sigtramp_p != NULL)
2003 {
2004 if (tdep->sigtramp_p (this_frame))
2005 return 1;
2006 }
2007
2008 if (tdep->sigtramp_start != 0)
2009 {
2010 CORE_ADDR pc = get_frame_pc (this_frame);
2011
2012 gdb_assert (tdep->sigtramp_end != 0);
2013 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2014 return 1;
2015 }
2016
2017 return 0;
2018 }
2019
2020 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2021 {
2022 SIGTRAMP_FRAME,
2023 amd64_sigtramp_frame_this_id,
2024 amd64_sigtramp_frame_prev_register,
2025 NULL,
2026 amd64_sigtramp_frame_sniffer
2027 };
2028 \f
2029
2030 static CORE_ADDR
2031 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2032 {
2033 struct amd64_frame_cache *cache =
2034 amd64_frame_cache (this_frame, this_cache);
2035
2036 return cache->base;
2037 }
2038
2039 static const struct frame_base amd64_frame_base =
2040 {
2041 &amd64_frame_unwind,
2042 amd64_frame_base_address,
2043 amd64_frame_base_address,
2044 amd64_frame_base_address
2045 };
2046
2047 /* Normal frames, but in a function epilogue. */
2048
2049 /* The epilogue is defined here as the 'ret' instruction, which will
2050 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2051 the function's stack frame. */
2052
2053 static int
2054 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2055 {
2056 gdb_byte insn;
2057
2058 if (target_read_memory (pc, &insn, 1))
2059 return 0; /* Can't read memory at pc. */
2060
2061 if (insn != 0xc3) /* 'ret' instruction. */
2062 return 0;
2063
2064 return 1;
2065 }
2066
2067 static int
2068 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2069 struct frame_info *this_frame,
2070 void **this_prologue_cache)
2071 {
2072 if (frame_relative_level (this_frame) == 0)
2073 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2074 get_frame_pc (this_frame));
2075 else
2076 return 0;
2077 }
2078
2079 static struct amd64_frame_cache *
2080 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2081 {
2082 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2083 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2084 struct amd64_frame_cache *cache;
2085 gdb_byte buf[8];
2086
2087 if (*this_cache)
2088 return *this_cache;
2089
2090 cache = amd64_alloc_frame_cache ();
2091 *this_cache = cache;
2092
2093 /* Cache base will be %esp plus cache->sp_offset (-8). */
2094 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2095 cache->base = extract_unsigned_integer (buf, 8,
2096 byte_order) + cache->sp_offset;
2097
2098 /* Cache pc will be the frame func. */
2099 cache->pc = get_frame_pc (this_frame);
2100
2101 /* The saved %esp will be at cache->base plus 16. */
2102 cache->saved_sp = cache->base + 16;
2103
2104 /* The saved %eip will be at cache->base plus 8. */
2105 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2106
2107 return cache;
2108 }
2109
2110 static void
2111 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2112 void **this_cache,
2113 struct frame_id *this_id)
2114 {
2115 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2116 this_cache);
2117
2118 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2119 }
2120
2121 static const struct frame_unwind amd64_epilogue_frame_unwind =
2122 {
2123 NORMAL_FRAME,
2124 amd64_epilogue_frame_this_id,
2125 amd64_frame_prev_register,
2126 NULL,
2127 amd64_epilogue_frame_sniffer
2128 };
2129
2130 static struct frame_id
2131 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2132 {
2133 CORE_ADDR fp;
2134
2135 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2136
2137 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2138 }
2139
2140 /* 16 byte align the SP per frame requirements. */
2141
2142 static CORE_ADDR
2143 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2144 {
2145 return sp & -(CORE_ADDR)16;
2146 }
2147 \f
2148
2149 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2150 in the floating-point register set REGSET to register cache
2151 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2152
2153 static void
2154 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2155 int regnum, const void *fpregs, size_t len)
2156 {
2157 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2158
2159 gdb_assert (len == tdep->sizeof_fpregset);
2160 amd64_supply_fxsave (regcache, regnum, fpregs);
2161 }
2162
2163 /* Collect register REGNUM from the register cache REGCACHE and store
2164 it in the buffer specified by FPREGS and LEN as described by the
2165 floating-point register set REGSET. If REGNUM is -1, do this for
2166 all registers in REGSET. */
2167
2168 static void
2169 amd64_collect_fpregset (const struct regset *regset,
2170 const struct regcache *regcache,
2171 int regnum, void *fpregs, size_t len)
2172 {
2173 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2174
2175 gdb_assert (len == tdep->sizeof_fpregset);
2176 amd64_collect_fxsave (regcache, regnum, fpregs);
2177 }
2178
2179 /* Return the appropriate register set for the core section identified
2180 by SECT_NAME and SECT_SIZE. */
2181
2182 static const struct regset *
2183 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2184 const char *sect_name, size_t sect_size)
2185 {
2186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2187
2188 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2189 {
2190 if (tdep->fpregset == NULL)
2191 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2192 amd64_collect_fpregset);
2193
2194 return tdep->fpregset;
2195 }
2196
2197 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2198 }
2199 \f
2200
2201 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2202 %rdi. We expect its value to be a pointer to the jmp_buf structure
2203 from which we extract the address that we will land at. This
2204 address is copied into PC. This routine returns non-zero on
2205 success. */
2206
2207 static int
2208 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2209 {
2210 gdb_byte buf[8];
2211 CORE_ADDR jb_addr;
2212 struct gdbarch *gdbarch = get_frame_arch (frame);
2213 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2214 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2215
2216 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2217 longjmp will land. */
2218 if (jb_pc_offset == -1)
2219 return 0;
2220
2221 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2222 jb_addr= extract_typed_address
2223 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2224 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2225 return 0;
2226
2227 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2228
2229 return 1;
2230 }
2231
2232 static const int amd64_record_regmap[] =
2233 {
2234 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2235 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2236 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2237 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2238 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2239 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2240 };
2241
2242 void
2243 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2244 {
2245 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2246 const struct target_desc *tdesc = info.target_desc;
2247
2248 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2249 floating-point registers. */
2250 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2251
2252 if (! tdesc_has_registers (tdesc))
2253 tdesc = tdesc_amd64;
2254 tdep->tdesc = tdesc;
2255
2256 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2257 tdep->register_names = amd64_register_names;
2258
2259 tdep->num_byte_regs = 20;
2260 tdep->num_word_regs = 16;
2261 tdep->num_dword_regs = 16;
2262 /* Avoid wiring in the MMX registers for now. */
2263 tdep->num_mmx_regs = 0;
2264
2265 set_gdbarch_pseudo_register_read (gdbarch,
2266 amd64_pseudo_register_read);
2267 set_gdbarch_pseudo_register_write (gdbarch,
2268 amd64_pseudo_register_write);
2269
2270 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2271
2272 /* AMD64 has an FPU and 16 SSE registers. */
2273 tdep->st0_regnum = AMD64_ST0_REGNUM;
2274 tdep->num_xmm_regs = 16;
2275
2276 /* This is what all the fuss is about. */
2277 set_gdbarch_long_bit (gdbarch, 64);
2278 set_gdbarch_long_long_bit (gdbarch, 64);
2279 set_gdbarch_ptr_bit (gdbarch, 64);
2280
2281 /* In contrast to the i386, on AMD64 a `long double' actually takes
2282 up 128 bits, even though it's still based on the i387 extended
2283 floating-point format which has only 80 significant bits. */
2284 set_gdbarch_long_double_bit (gdbarch, 128);
2285
2286 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2287
2288 /* Register numbers of various important registers. */
2289 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2290 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2291 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2292 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2293
2294 /* The "default" register numbering scheme for AMD64 is referred to
2295 as the "DWARF Register Number Mapping" in the System V psABI.
2296 The preferred debugging format for all known AMD64 targets is
2297 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2298 DWARF-1), but we provide the same mapping just in case. This
2299 mapping is also used for stabs, which GCC does support. */
2300 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2301 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2302
2303 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2304 be in use on any of the supported AMD64 targets. */
2305
2306 /* Call dummy code. */
2307 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2308 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2309 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2310 tdep->call_dummy_num_integer_regs =
2311 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2312 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2313 tdep->classify = amd64_classify;
2314
2315 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2316 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2317 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2318
2319 set_gdbarch_return_value (gdbarch, amd64_return_value);
2320
2321 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2322
2323 tdep->record_regmap = amd64_record_regmap;
2324
2325 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2326
2327 /* Hook the function epilogue frame unwinder. This unwinder is
2328 appended to the list first, so that it supercedes the other
2329 unwinders in function epilogues. */
2330 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2331
2332 /* Hook the prologue-based frame unwinders. */
2333 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2334 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2335 frame_base_set_default (gdbarch, &amd64_frame_base);
2336
2337 /* If we have a register mapping, enable the generic core file support. */
2338 if (tdep->gregset_reg_offset)
2339 set_gdbarch_regset_from_core_section (gdbarch,
2340 amd64_regset_from_core_section);
2341
2342 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2343 }
2344
2345 /* Provide a prototype to silence -Wmissing-prototypes. */
2346 void _initialize_amd64_tdep (void);
2347
2348 void
2349 _initialize_amd64_tdep (void)
2350 {
2351 initialize_tdesc_amd64 ();
2352 }
2353 \f
2354
2355 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2356 sense that the instruction pointer and data pointer are simply
2357 64-bit offsets into the code segment and the data segment instead
2358 of a selector offset pair. The functions below store the upper 32
2359 bits of these pointers (instead of just the 16-bits of the segment
2360 selector). */
2361
2362 /* Fill register REGNUM in REGCACHE with the appropriate
2363 floating-point or SSE register value from *FXSAVE. If REGNUM is
2364 -1, do this for all registers. This function masks off any of the
2365 reserved bits in *FXSAVE. */
2366
2367 void
2368 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2369 const void *fxsave)
2370 {
2371 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2372 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2373
2374 i387_supply_fxsave (regcache, regnum, fxsave);
2375
2376 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2377 {
2378 const gdb_byte *regs = fxsave;
2379
2380 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2381 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2382 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2383 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2384 }
2385 }
2386
2387 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2388 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2389 all registers. This function doesn't touch any of the reserved
2390 bits in *FXSAVE. */
2391
2392 void
2393 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2394 void *fxsave)
2395 {
2396 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2397 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2398 gdb_byte *regs = fxsave;
2399
2400 i387_collect_fxsave (regcache, regnum, fxsave);
2401
2402 if (gdbarch_ptr_bit (gdbarch) == 64)
2403 {
2404 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2405 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2406 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2407 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2408 }
2409 }
This page took 0.082406 seconds and 4 git commands to generate.