* x86-64-tdep.c (amd64_classify_aggregate): Ignore static fields.
[deliverable/binutils-gdb.git] / gdb / x86-64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Jiri Smid, SuSE Labs.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "defs.h"
24 #include "arch-utils.h"
25 #include "block.h"
26 #include "dummy-frame.h"
27 #include "frame.h"
28 #include "frame-base.h"
29 #include "frame-unwind.h"
30 #include "inferior.h"
31 #include "gdbcmd.h"
32 #include "gdbcore.h"
33 #include "objfiles.h"
34 #include "regcache.h"
35 #include "regset.h"
36 #include "symfile.h"
37
38 #include "gdb_assert.h"
39
40 #include "x86-64-tdep.h"
41 #include "i387-tdep.h"
42
43 /* Note that the AMD64 architecture was previously known as x86-64.
44 The latter is (forever) engraved into the canonical system name as
45 returned bu config.guess, and used as the name for the AMD64 port
46 of GNU/Linux. The BSD's have renamed their ports to amd64; they
47 don't like to shout. For GDB we prefer the amd64_-prefix over the
48 x86_64_-prefix since it's so much easier to type. */
49
50 /* Register information. */
51
52 struct amd64_register_info
53 {
54 char *name;
55 struct type **type;
56 };
57
58 static struct amd64_register_info amd64_register_info[] =
59 {
60 { "rax", &builtin_type_int64 },
61 { "rbx", &builtin_type_int64 },
62 { "rcx", &builtin_type_int64 },
63 { "rdx", &builtin_type_int64 },
64 { "rsi", &builtin_type_int64 },
65 { "rdi", &builtin_type_int64 },
66 { "rbp", &builtin_type_void_data_ptr },
67 { "rsp", &builtin_type_void_data_ptr },
68
69 /* %r8 is indeed register number 8. */
70 { "r8", &builtin_type_int64 },
71 { "r9", &builtin_type_int64 },
72 { "r10", &builtin_type_int64 },
73 { "r11", &builtin_type_int64 },
74 { "r12", &builtin_type_int64 },
75 { "r13", &builtin_type_int64 },
76 { "r14", &builtin_type_int64 },
77 { "r15", &builtin_type_int64 },
78 { "rip", &builtin_type_void_func_ptr },
79 { "eflags", &builtin_type_int32 },
80 { "cs", &builtin_type_int32 },
81 { "ss", &builtin_type_int32 },
82 { "ds", &builtin_type_int32 },
83 { "es", &builtin_type_int32 },
84 { "fs", &builtin_type_int32 },
85 { "gs", &builtin_type_int32 },
86
87 /* %st0 is register number 24. */
88 { "st0", &builtin_type_i387_ext },
89 { "st1", &builtin_type_i387_ext },
90 { "st2", &builtin_type_i387_ext },
91 { "st3", &builtin_type_i387_ext },
92 { "st4", &builtin_type_i387_ext },
93 { "st5", &builtin_type_i387_ext },
94 { "st6", &builtin_type_i387_ext },
95 { "st7", &builtin_type_i387_ext },
96 { "fctrl", &builtin_type_int32 },
97 { "fstat", &builtin_type_int32 },
98 { "ftag", &builtin_type_int32 },
99 { "fiseg", &builtin_type_int32 },
100 { "fioff", &builtin_type_int32 },
101 { "foseg", &builtin_type_int32 },
102 { "fooff", &builtin_type_int32 },
103 { "fop", &builtin_type_int32 },
104
105 /* %xmm0 is register number 40. */
106 { "xmm0", &builtin_type_v4sf },
107 { "xmm1", &builtin_type_v4sf },
108 { "xmm2", &builtin_type_v4sf },
109 { "xmm3", &builtin_type_v4sf },
110 { "xmm4", &builtin_type_v4sf },
111 { "xmm5", &builtin_type_v4sf },
112 { "xmm6", &builtin_type_v4sf },
113 { "xmm7", &builtin_type_v4sf },
114 { "xmm8", &builtin_type_v4sf },
115 { "xmm9", &builtin_type_v4sf },
116 { "xmm10", &builtin_type_v4sf },
117 { "xmm11", &builtin_type_v4sf },
118 { "xmm12", &builtin_type_v4sf },
119 { "xmm13", &builtin_type_v4sf },
120 { "xmm14", &builtin_type_v4sf },
121 { "xmm15", &builtin_type_v4sf },
122 { "mxcsr", &builtin_type_int32 }
123 };
124
125 /* Total number of registers. */
126 #define AMD64_NUM_REGS \
127 (sizeof (amd64_register_info) / sizeof (amd64_register_info[0]))
128
129 /* Return the name of register REGNUM. */
130
131 static const char *
132 amd64_register_name (int regnum)
133 {
134 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
135 return amd64_register_info[regnum].name;
136
137 return NULL;
138 }
139
140 /* Return the GDB type object for the "standard" data type of data in
141 register REGNUM. */
142
143 static struct type *
144 amd64_register_type (struct gdbarch *gdbarch, int regnum)
145 {
146 gdb_assert (regnum >= 0 && regnum < AMD64_NUM_REGS);
147
148 return *amd64_register_info[regnum].type;
149 }
150
151 /* DWARF Register Number Mapping as defined in the System V psABI,
152 section 3.6. */
153
154 static int amd64_dwarf_regmap[] =
155 {
156 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
157 X86_64_RAX_REGNUM, X86_64_RDX_REGNUM, 2, 1,
158 4, X86_64_RDI_REGNUM,
159
160 /* Frame Pointer Register RBP. */
161 X86_64_RBP_REGNUM,
162
163 /* Stack Pointer Register RSP. */
164 X86_64_RSP_REGNUM,
165
166 /* Extended Integer Registers 8 - 15. */
167 8, 9, 10, 11, 12, 13, 14, 15,
168
169 /* Return Address RA. Mapped to RIP. */
170 X86_64_RIP_REGNUM,
171
172 /* SSE Registers 0 - 7. */
173 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
174 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
175 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
176 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
177
178 /* Extended SSE Registers 8 - 15. */
179 X86_64_XMM0_REGNUM + 8, X86_64_XMM0_REGNUM + 9,
180 X86_64_XMM0_REGNUM + 10, X86_64_XMM0_REGNUM + 11,
181 X86_64_XMM0_REGNUM + 12, X86_64_XMM0_REGNUM + 13,
182 X86_64_XMM0_REGNUM + 14, X86_64_XMM0_REGNUM + 15,
183
184 /* Floating Point Registers 0-7. */
185 X86_64_ST0_REGNUM + 0, X86_64_ST0_REGNUM + 1,
186 X86_64_ST0_REGNUM + 2, X86_64_ST0_REGNUM + 3,
187 X86_64_ST0_REGNUM + 4, X86_64_ST0_REGNUM + 5,
188 X86_64_ST0_REGNUM + 6, X86_64_ST0_REGNUM + 7
189 };
190
191 static const int amd64_dwarf_regmap_len =
192 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
193
194 /* Convert DWARF register number REG to the appropriate register
195 number used by GDB. */
196
197 static int
198 amd64_dwarf_reg_to_regnum (int reg)
199 {
200 int regnum = -1;
201
202 if (reg >= 0 || reg < amd64_dwarf_regmap_len)
203 regnum = amd64_dwarf_regmap[reg];
204
205 if (regnum == -1)
206 warning ("Unmapped DWARF Register #%d encountered\n", reg);
207
208 return regnum;
209 }
210
211 /* Return nonzero if a value of type TYPE stored in register REGNUM
212 needs any special handling. */
213
214 static int
215 amd64_convert_register_p (int regnum, struct type *type)
216 {
217 return i386_fp_regnum_p (regnum);
218 }
219 \f
220
221 /* Register classes as defined in the psABI. */
222
223 enum amd64_reg_class
224 {
225 AMD64_INTEGER,
226 AMD64_SSE,
227 AMD64_SSEUP,
228 AMD64_X87,
229 AMD64_X87UP,
230 AMD64_COMPLEX_X87,
231 AMD64_NO_CLASS,
232 AMD64_MEMORY
233 };
234
235 /* Return the union class of CLASS1 and CLASS2. See the psABI for
236 details. */
237
238 static enum amd64_reg_class
239 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
240 {
241 /* Rule (a): If both classes are equal, this is the resulting class. */
242 if (class1 == class2)
243 return class1;
244
245 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
246 is the other class. */
247 if (class1 == AMD64_NO_CLASS)
248 return class2;
249 if (class2 == AMD64_NO_CLASS)
250 return class1;
251
252 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
253 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
254 return AMD64_MEMORY;
255
256 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
257 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
258 return AMD64_INTEGER;
259
260 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
261 MEMORY is used as class. */
262 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
263 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
264 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
265 return AMD64_MEMORY;
266
267 /* Rule (f): Otherwise class SSE is used. */
268 return AMD64_SSE;
269 }
270
271 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
272
273 /* Classify TYPE according to the rules for aggregate (structures and
274 arrays) and union types, and store the result in CLASS. */
275
276 static void
277 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
278 {
279 int len = TYPE_LENGTH (type);
280
281 /* 1. If the size of an object is larger than two eightbytes, or in
282 C++, is a non-POD structure or union type, or contains
283 unaligned fields, it has class memory. */
284 if (len > 16)
285 {
286 class[0] = class[1] = AMD64_MEMORY;
287 return;
288 }
289
290 /* 2. Both eightbytes get initialized to class NO_CLASS. */
291 class[0] = class[1] = AMD64_NO_CLASS;
292
293 /* 3. Each field of an object is classified recursively so that
294 always two fields are considered. The resulting class is
295 calculated according to the classes of the fields in the
296 eightbyte: */
297
298 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
299 {
300 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
301
302 /* All fields in an array have the same type. */
303 amd64_classify (subtype, class);
304 if (len > 8 && class[1] == AMD64_NO_CLASS)
305 class[1] = class[0];
306 }
307 else
308 {
309 int i;
310
311 /* Structure or union. */
312 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
313 || TYPE_CODE (type) == TYPE_CODE_UNION);
314
315 for (i = 0; i < TYPE_NFIELDS (type); i++)
316 {
317 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
318 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
319 enum amd64_reg_class subclass[2];
320
321 /* Ignore static fields. */
322 if (TYPE_FIELD_STATIC (type, i))
323 continue;
324
325 gdb_assert (pos == 0 || pos == 1);
326
327 amd64_classify (subtype, subclass);
328 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
329 if (pos == 0)
330 class[1] = amd64_merge_classes (class[1], subclass[1]);
331 }
332 }
333
334 /* 4. Then a post merger cleanup is done: */
335
336 /* Rule (a): If one of the classes is MEMORY, the whole argument is
337 passed in memory. */
338 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
339 class[0] = class[1] = AMD64_MEMORY;
340
341 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
342 SSE. */
343 if (class[0] == AMD64_SSEUP)
344 class[0] = AMD64_SSE;
345 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
346 class[1] = AMD64_SSE;
347 }
348
349 /* Classify TYPE, and store the result in CLASS. */
350
351 static void
352 amd64_classify (struct type *type, enum amd64_reg_class class[2])
353 {
354 enum type_code code = TYPE_CODE (type);
355 int len = TYPE_LENGTH (type);
356
357 class[0] = class[1] = AMD64_NO_CLASS;
358
359 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
360 long, long long, and pointers are in the INTEGER class. */
361 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
362 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
363 && (len == 1 || len == 2 || len == 4 || len == 8))
364 class[0] = AMD64_INTEGER;
365
366 /* Arguments of types float, double and __m64 are in class SSE. */
367 else if (code == TYPE_CODE_FLT && (len == 4 || len == 8))
368 /* FIXME: __m64 . */
369 class[0] = AMD64_SSE;
370
371 /* Arguments of types __float128 and __m128 are split into two
372 halves. The least significant ones belong to class SSE, the most
373 significant one to class SSEUP. */
374 /* FIXME: __float128, __m128. */
375
376 /* The 64-bit mantissa of arguments of type long double belongs to
377 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
378 class X87UP. */
379 else if (code == TYPE_CODE_FLT && len == 16)
380 /* Class X87 and X87UP. */
381 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
382
383 /* Aggregates. */
384 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
385 || code == TYPE_CODE_UNION)
386 amd64_classify_aggregate (type, class);
387 }
388
389 static enum return_value_convention
390 amd64_return_value (struct gdbarch *gdbarch, struct type *type,
391 struct regcache *regcache,
392 void *readbuf, const void *writebuf)
393 {
394 enum amd64_reg_class class[2];
395 int len = TYPE_LENGTH (type);
396 static int integer_regnum[] = { X86_64_RAX_REGNUM, X86_64_RDX_REGNUM };
397 static int sse_regnum[] = { X86_64_XMM0_REGNUM, X86_64_XMM1_REGNUM };
398 int integer_reg = 0;
399 int sse_reg = 0;
400 int i;
401
402 gdb_assert (!(readbuf && writebuf));
403
404 /* 1. Classify the return type with the classification algorithm. */
405 amd64_classify (type, class);
406
407 /* 2. If the type has class MEMORY, then the caller provides space
408 for the return value and passes the address of this storage in
409 %rdi as if it were the first argument to the function. In
410 effect, this address becomes a hidden first argument. */
411 if (class[0] == AMD64_MEMORY)
412 return RETURN_VALUE_STRUCT_CONVENTION;
413
414 gdb_assert (class[1] != AMD64_MEMORY);
415 gdb_assert (len <= 16);
416
417 for (i = 0; len > 0; i++, len -= 8)
418 {
419 int regnum = -1;
420 int offset = 0;
421
422 switch (class[i])
423 {
424 case AMD64_INTEGER:
425 /* 3. If the class is INTEGER, the next available register
426 of the sequence %rax, %rdx is used. */
427 regnum = integer_regnum[integer_reg++];
428 break;
429
430 case AMD64_SSE:
431 /* 4. If the class is SSE, the next available SSE register
432 of the sequence %xmm0, %xmm1 is used. */
433 regnum = sse_regnum[sse_reg++];
434 break;
435
436 case AMD64_SSEUP:
437 /* 5. If the class is SSEUP, the eightbyte is passed in the
438 upper half of the last used SSE register. */
439 gdb_assert (sse_reg > 0);
440 regnum = sse_regnum[sse_reg - 1];
441 offset = 8;
442 break;
443
444 case AMD64_X87:
445 /* 6. If the class is X87, the value is returned on the X87
446 stack in %st0 as 80-bit x87 number. */
447 regnum = X86_64_ST0_REGNUM;
448 if (writebuf)
449 i387_return_value (gdbarch, regcache);
450 break;
451
452 case AMD64_X87UP:
453 /* 7. If the class is X87UP, the value is returned together
454 with the previous X87 value in %st0. */
455 gdb_assert (i > 0 && class[0] == AMD64_X87);
456 regnum = X86_64_ST0_REGNUM;
457 offset = 8;
458 len = 2;
459 break;
460
461 case AMD64_NO_CLASS:
462 continue;
463
464 default:
465 gdb_assert (!"Unexpected register class.");
466 }
467
468 gdb_assert (regnum != -1);
469
470 if (readbuf)
471 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
472 (char *) readbuf + i * 8);
473 if (writebuf)
474 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
475 (const char *) writebuf + i * 8);
476 }
477
478 return RETURN_VALUE_REGISTER_CONVENTION;
479 }
480 \f
481
482 static CORE_ADDR
483 amd64_push_arguments (struct regcache *regcache, int nargs,
484 struct value **args, CORE_ADDR sp)
485 {
486 static int integer_regnum[] =
487 {
488 X86_64_RDI_REGNUM, 4, /* %rdi, %rsi */
489 X86_64_RDX_REGNUM, 2, /* %rdx, %rcx */
490 8, 9 /* %r8, %r9 */
491 };
492 static int sse_regnum[] =
493 {
494 /* %xmm0 ... %xmm7 */
495 X86_64_XMM0_REGNUM + 0, X86_64_XMM1_REGNUM,
496 X86_64_XMM0_REGNUM + 2, X86_64_XMM0_REGNUM + 3,
497 X86_64_XMM0_REGNUM + 4, X86_64_XMM0_REGNUM + 5,
498 X86_64_XMM0_REGNUM + 6, X86_64_XMM0_REGNUM + 7,
499 };
500 struct value **stack_args = alloca (nargs * sizeof (struct value *));
501 int num_stack_args = 0;
502 int num_elements = 0;
503 int element = 0;
504 int integer_reg = 0;
505 int sse_reg = 0;
506 int i;
507
508 for (i = 0; i < nargs; i++)
509 {
510 struct type *type = VALUE_TYPE (args[i]);
511 int len = TYPE_LENGTH (type);
512 enum amd64_reg_class class[2];
513 int needed_integer_regs = 0;
514 int needed_sse_regs = 0;
515 int j;
516
517 /* Classify argument. */
518 amd64_classify (type, class);
519
520 /* Calculate the number of integer and SSE registers needed for
521 this argument. */
522 for (j = 0; j < 2; j++)
523 {
524 if (class[j] == AMD64_INTEGER)
525 needed_integer_regs++;
526 else if (class[j] == AMD64_SSE)
527 needed_sse_regs++;
528 }
529
530 /* Check whether enough registers are available, and if the
531 argument should be passed in registers at all. */
532 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
533 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
534 || (needed_integer_regs == 0 && needed_sse_regs == 0))
535 {
536 /* The argument will be passed on the stack. */
537 num_elements += ((len + 7) / 8);
538 stack_args[num_stack_args++] = args[i];
539 }
540 else
541 {
542 /* The argument will be passed in registers. */
543 char *valbuf = VALUE_CONTENTS (args[i]);
544 char buf[8];
545
546 gdb_assert (len <= 16);
547
548 for (j = 0; len > 0; j++, len -= 8)
549 {
550 int regnum = -1;
551 int offset = 0;
552
553 switch (class[j])
554 {
555 case AMD64_INTEGER:
556 regnum = integer_regnum[integer_reg++];
557 break;
558
559 case AMD64_SSE:
560 regnum = sse_regnum[sse_reg++];
561 break;
562
563 case AMD64_SSEUP:
564 gdb_assert (sse_reg > 0);
565 regnum = sse_regnum[sse_reg - 1];
566 offset = 8;
567 break;
568
569 default:
570 gdb_assert (!"Unexpected register class.");
571 }
572
573 gdb_assert (regnum != -1);
574 memset (buf, 0, sizeof buf);
575 memcpy (buf, valbuf + j * 8, min (len, 8));
576 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
577 }
578 }
579 }
580
581 /* Allocate space for the arguments on the stack. */
582 sp -= num_elements * 8;
583
584 /* The psABI says that "The end of the input argument area shall be
585 aligned on a 16 byte boundary." */
586 sp &= ~0xf;
587
588 /* Write out the arguments to the stack. */
589 for (i = 0; i < num_stack_args; i++)
590 {
591 struct type *type = VALUE_TYPE (stack_args[i]);
592 char *valbuf = VALUE_CONTENTS (stack_args[i]);
593 int len = TYPE_LENGTH (type);
594
595 write_memory (sp + element * 8, valbuf, len);
596 element += ((len + 7) / 8);
597 }
598
599 /* The psABI says that "For calls that may call functions that use
600 varargs or stdargs (prototype-less calls or calls to functions
601 containing ellipsis (...) in the declaration) %al is used as
602 hidden argument to specify the number of SSE registers used. */
603 regcache_raw_write_unsigned (regcache, X86_64_RAX_REGNUM, sse_reg);
604 return sp;
605 }
606
607 static CORE_ADDR
608 amd64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
609 struct regcache *regcache, CORE_ADDR bp_addr,
610 int nargs, struct value **args, CORE_ADDR sp,
611 int struct_return, CORE_ADDR struct_addr)
612 {
613 char buf[8];
614
615 /* Pass arguments. */
616 sp = amd64_push_arguments (regcache, nargs, args, sp);
617
618 /* Pass "hidden" argument". */
619 if (struct_return)
620 {
621 store_unsigned_integer (buf, 8, struct_addr);
622 regcache_cooked_write (regcache, X86_64_RDI_REGNUM, buf);
623 }
624
625 /* Store return address. */
626 sp -= 8;
627 store_unsigned_integer (buf, 8, bp_addr);
628 write_memory (sp, buf, 8);
629
630 /* Finally, update the stack pointer... */
631 store_unsigned_integer (buf, 8, sp);
632 regcache_cooked_write (regcache, X86_64_RSP_REGNUM, buf);
633
634 /* ...and fake a frame pointer. */
635 regcache_cooked_write (regcache, X86_64_RBP_REGNUM, buf);
636
637 return sp + 16;
638 }
639 \f
640
641 /* The maximum number of saved registers. This should include %rip. */
642 #define AMD64_NUM_SAVED_REGS X86_64_NUM_GREGS
643
644 struct amd64_frame_cache
645 {
646 /* Base address. */
647 CORE_ADDR base;
648 CORE_ADDR sp_offset;
649 CORE_ADDR pc;
650
651 /* Saved registers. */
652 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
653 CORE_ADDR saved_sp;
654
655 /* Do we have a frame? */
656 int frameless_p;
657 };
658
659 /* Allocate and initialize a frame cache. */
660
661 static struct amd64_frame_cache *
662 amd64_alloc_frame_cache (void)
663 {
664 struct amd64_frame_cache *cache;
665 int i;
666
667 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
668
669 /* Base address. */
670 cache->base = 0;
671 cache->sp_offset = -8;
672 cache->pc = 0;
673
674 /* Saved registers. We initialize these to -1 since zero is a valid
675 offset (that's where %rbp is supposed to be stored). */
676 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
677 cache->saved_regs[i] = -1;
678 cache->saved_sp = 0;
679
680 /* Frameless until proven otherwise. */
681 cache->frameless_p = 1;
682
683 return cache;
684 }
685
686 /* Do a limited analysis of the prologue at PC and update CACHE
687 accordingly. Bail out early if CURRENT_PC is reached. Return the
688 address where the analysis stopped.
689
690 We will handle only functions beginning with:
691
692 pushq %rbp 0x55
693 movq %rsp, %rbp 0x48 0x89 0xe5
694
695 Any function that doesn't start with this sequence will be assumed
696 to have no prologue and thus no valid frame pointer in %rbp. */
697
698 static CORE_ADDR
699 amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
700 struct amd64_frame_cache *cache)
701 {
702 static unsigned char proto[3] = { 0x48, 0x89, 0xe5 };
703 unsigned char buf[3];
704 unsigned char op;
705
706 if (current_pc <= pc)
707 return current_pc;
708
709 op = read_memory_unsigned_integer (pc, 1);
710
711 if (op == 0x55) /* pushq %rbp */
712 {
713 /* Take into account that we've executed the `pushq %rbp' that
714 starts this instruction sequence. */
715 cache->saved_regs[X86_64_RBP_REGNUM] = 0;
716 cache->sp_offset += 8;
717
718 /* If that's all, return now. */
719 if (current_pc <= pc + 1)
720 return current_pc;
721
722 /* Check for `movq %rsp, %rbp'. */
723 read_memory (pc + 1, buf, 3);
724 if (memcmp (buf, proto, 3) != 0)
725 return pc + 1;
726
727 /* OK, we actually have a frame. */
728 cache->frameless_p = 0;
729 return pc + 4;
730 }
731
732 return pc;
733 }
734
735 /* Return PC of first real instruction. */
736
737 static CORE_ADDR
738 amd64_skip_prologue (CORE_ADDR start_pc)
739 {
740 struct amd64_frame_cache cache;
741 CORE_ADDR pc;
742
743 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffff, &cache);
744 if (cache.frameless_p)
745 return start_pc;
746
747 return pc;
748 }
749 \f
750
751 /* Normal frames. */
752
753 static struct amd64_frame_cache *
754 amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
755 {
756 struct amd64_frame_cache *cache;
757 char buf[8];
758 int i;
759
760 if (*this_cache)
761 return *this_cache;
762
763 cache = amd64_alloc_frame_cache ();
764 *this_cache = cache;
765
766 cache->pc = frame_func_unwind (next_frame);
767 if (cache->pc != 0)
768 amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
769
770 if (cache->frameless_p)
771 {
772 /* We didn't find a valid frame, which means that CACHE->base
773 currently holds the frame pointer for our calling frame. If
774 we're at the start of a function, or somewhere half-way its
775 prologue, the function's frame probably hasn't been fully
776 setup yet. Try to reconstruct the base address for the stack
777 frame by looking at the stack pointer. For truly "frameless"
778 functions this might work too. */
779
780 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
781 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
782 }
783 else
784 {
785 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
786 cache->base = extract_unsigned_integer (buf, 8);
787 }
788
789 /* Now that we have the base address for the stack frame we can
790 calculate the value of %rsp in the calling frame. */
791 cache->saved_sp = cache->base + 16;
792
793 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
794 frame we find it at the same offset from the reconstructed base
795 address. */
796 cache->saved_regs[X86_64_RIP_REGNUM] = 8;
797
798 /* Adjust all the saved registers such that they contain addresses
799 instead of offsets. */
800 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
801 if (cache->saved_regs[i] != -1)
802 cache->saved_regs[i] += cache->base;
803
804 return cache;
805 }
806
807 static void
808 amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
809 struct frame_id *this_id)
810 {
811 struct amd64_frame_cache *cache =
812 amd64_frame_cache (next_frame, this_cache);
813
814 /* This marks the outermost frame. */
815 if (cache->base == 0)
816 return;
817
818 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
819 }
820
821 static void
822 amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
823 int regnum, int *optimizedp,
824 enum lval_type *lvalp, CORE_ADDR *addrp,
825 int *realnump, void *valuep)
826 {
827 struct amd64_frame_cache *cache =
828 amd64_frame_cache (next_frame, this_cache);
829
830 gdb_assert (regnum >= 0);
831
832 if (regnum == SP_REGNUM && cache->saved_sp)
833 {
834 *optimizedp = 0;
835 *lvalp = not_lval;
836 *addrp = 0;
837 *realnump = -1;
838 if (valuep)
839 {
840 /* Store the value. */
841 store_unsigned_integer (valuep, 8, cache->saved_sp);
842 }
843 return;
844 }
845
846 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
847 {
848 *optimizedp = 0;
849 *lvalp = lval_memory;
850 *addrp = cache->saved_regs[regnum];
851 *realnump = -1;
852 if (valuep)
853 {
854 /* Read the value in from memory. */
855 read_memory (*addrp, valuep,
856 register_size (current_gdbarch, regnum));
857 }
858 return;
859 }
860
861 frame_register_unwind (next_frame, regnum,
862 optimizedp, lvalp, addrp, realnump, valuep);
863 }
864
865 static const struct frame_unwind amd64_frame_unwind =
866 {
867 NORMAL_FRAME,
868 amd64_frame_this_id,
869 amd64_frame_prev_register
870 };
871
872 static const struct frame_unwind *
873 amd64_frame_sniffer (struct frame_info *next_frame)
874 {
875 return &amd64_frame_unwind;
876 }
877 \f
878
879 /* Signal trampolines. */
880
881 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
882 64-bit variants. This would require using identical frame caches
883 on both platforms. */
884
885 static struct amd64_frame_cache *
886 amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
887 {
888 struct amd64_frame_cache *cache;
889 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
890 CORE_ADDR addr;
891 char buf[8];
892 int i;
893
894 if (*this_cache)
895 return *this_cache;
896
897 cache = amd64_alloc_frame_cache ();
898
899 frame_unwind_register (next_frame, X86_64_RSP_REGNUM, buf);
900 cache->base = extract_unsigned_integer (buf, 8) - 8;
901
902 addr = tdep->sigcontext_addr (next_frame);
903 gdb_assert (tdep->sc_reg_offset);
904 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
905 for (i = 0; i < tdep->sc_num_regs; i++)
906 if (tdep->sc_reg_offset[i] != -1)
907 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
908
909 *this_cache = cache;
910 return cache;
911 }
912
913 static void
914 amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
915 void **this_cache, struct frame_id *this_id)
916 {
917 struct amd64_frame_cache *cache =
918 amd64_sigtramp_frame_cache (next_frame, this_cache);
919
920 (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
921 }
922
923 static void
924 amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
925 void **this_cache,
926 int regnum, int *optimizedp,
927 enum lval_type *lvalp, CORE_ADDR *addrp,
928 int *realnump, void *valuep)
929 {
930 /* Make sure we've initialized the cache. */
931 amd64_sigtramp_frame_cache (next_frame, this_cache);
932
933 amd64_frame_prev_register (next_frame, this_cache, regnum,
934 optimizedp, lvalp, addrp, realnump, valuep);
935 }
936
937 static const struct frame_unwind amd64_sigtramp_frame_unwind =
938 {
939 SIGTRAMP_FRAME,
940 amd64_sigtramp_frame_this_id,
941 amd64_sigtramp_frame_prev_register
942 };
943
944 static const struct frame_unwind *
945 amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
946 {
947 CORE_ADDR pc = frame_pc_unwind (next_frame);
948 char *name;
949
950 find_pc_partial_function (pc, &name, NULL, NULL);
951 if (PC_IN_SIGTRAMP (pc, name))
952 {
953 gdb_assert (gdbarch_tdep (current_gdbarch)->sigcontext_addr);
954
955 return &amd64_sigtramp_frame_unwind;
956 }
957
958 return NULL;
959 }
960 \f
961
962 static CORE_ADDR
963 amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
964 {
965 struct amd64_frame_cache *cache =
966 amd64_frame_cache (next_frame, this_cache);
967
968 return cache->base;
969 }
970
971 static const struct frame_base amd64_frame_base =
972 {
973 &amd64_frame_unwind,
974 amd64_frame_base_address,
975 amd64_frame_base_address,
976 amd64_frame_base_address
977 };
978
979 static struct frame_id
980 amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
981 {
982 char buf[8];
983 CORE_ADDR fp;
984
985 frame_unwind_register (next_frame, X86_64_RBP_REGNUM, buf);
986 fp = extract_unsigned_integer (buf, 8);
987
988 return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
989 }
990
991 /* 16 byte align the SP per frame requirements. */
992
993 static CORE_ADDR
994 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
995 {
996 return sp & -(CORE_ADDR)16;
997 }
998 \f
999
1000 /* Supply register REGNUM from the floating-point register set REGSET
1001 to register cache REGCACHE. If REGNUM is -1, do this for all
1002 registers in REGSET. */
1003
1004 static void
1005 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1006 int regnum, const void *fpregs, size_t len)
1007 {
1008 const struct gdbarch_tdep *tdep = regset->descr;
1009
1010 gdb_assert (len == tdep->sizeof_fpregset);
1011 x86_64_supply_fxsave (regcache, regnum, fpregs);
1012 }
1013
1014 /* Return the appropriate register set for the core section identified
1015 by SECT_NAME and SECT_SIZE. */
1016
1017 static const struct regset *
1018 amd64_regset_from_core_section (struct gdbarch *gdbarch,
1019 const char *sect_name, size_t sect_size)
1020 {
1021 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1022
1023 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1024 {
1025 if (tdep->fpregset == NULL)
1026 {
1027 tdep->fpregset = XMALLOC (struct regset);
1028 tdep->fpregset->descr = tdep;
1029 tdep->fpregset->supply_regset = amd64_supply_fpregset;
1030 }
1031
1032 return tdep->fpregset;
1033 }
1034
1035 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1036 }
1037 \f
1038
1039 void
1040 x86_64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1041 {
1042 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1043
1044 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1045 floating-point registers. */
1046 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1047
1048 /* AMD64 has an FPU and 16 SSE registers. */
1049 tdep->st0_regnum = X86_64_ST0_REGNUM;
1050 tdep->num_xmm_regs = 16;
1051
1052 /* This is what all the fuss is about. */
1053 set_gdbarch_long_bit (gdbarch, 64);
1054 set_gdbarch_long_long_bit (gdbarch, 64);
1055 set_gdbarch_ptr_bit (gdbarch, 64);
1056
1057 /* In contrast to the i386, on AMD64 a `long double' actually takes
1058 up 128 bits, even though it's still based on the i387 extended
1059 floating-point format which has only 80 significant bits. */
1060 set_gdbarch_long_double_bit (gdbarch, 128);
1061
1062 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
1063 set_gdbarch_register_name (gdbarch, amd64_register_name);
1064 set_gdbarch_register_type (gdbarch, amd64_register_type);
1065
1066 /* Register numbers of various important registers. */
1067 set_gdbarch_sp_regnum (gdbarch, X86_64_RSP_REGNUM); /* %rsp */
1068 set_gdbarch_pc_regnum (gdbarch, X86_64_RIP_REGNUM); /* %rip */
1069 set_gdbarch_ps_regnum (gdbarch, X86_64_EFLAGS_REGNUM); /* %eflags */
1070 set_gdbarch_fp0_regnum (gdbarch, X86_64_ST0_REGNUM); /* %st(0) */
1071
1072 /* The "default" register numbering scheme for AMD64 is referred to
1073 as the "DWARF Register Number Mapping" in the System V psABI.
1074 The preferred debugging format for all known AMD64 targets is
1075 actually DWARF2, and GCC doesn't seem to support DWARF (that is
1076 DWARF-1), but we provide the same mapping just in case. This
1077 mapping is also used for stabs, which GCC does support. */
1078 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1079 set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1080 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
1081
1082 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
1083 be in use on any of the supported AMD64 targets. */
1084
1085 /* Call dummy code. */
1086 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
1087 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
1088 set_gdbarch_frame_red_zone_size (gdbarch, 128);
1089
1090 set_gdbarch_convert_register_p (gdbarch, amd64_convert_register_p);
1091 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
1092 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
1093
1094 set_gdbarch_return_value (gdbarch, amd64_return_value);
1095 /* Override, since this is handled by amd64_extract_return_value. */
1096 set_gdbarch_extract_struct_value_address (gdbarch, NULL);
1097
1098 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
1099
1100 /* Avoid wiring in the MMX registers for now. */
1101 set_gdbarch_num_pseudo_regs (gdbarch, 0);
1102 tdep->mm0_regnum = -1;
1103
1104 set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
1105
1106 /* FIXME: kettenis/20021026: This is ELF-specific. Fine for now,
1107 since all supported AMD64 targets are ELF, but that might change
1108 in the future. */
1109 set_gdbarch_in_solib_call_trampoline (gdbarch, in_plt_section);
1110
1111 frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
1112 frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
1113 frame_base_set_default (gdbarch, &amd64_frame_base);
1114
1115 /* If we have a register mapping, enable the generic core file support. */
1116 if (tdep->gregset_reg_offset)
1117 set_gdbarch_regset_from_core_section (gdbarch,
1118 amd64_regset_from_core_section);
1119 }
1120 \f
1121
1122 #define I387_ST0_REGNUM X86_64_ST0_REGNUM
1123
1124 /* The 64-bit FXSAVE format differs from the 32-bit format in the
1125 sense that the instruction pointer and data pointer are simply
1126 64-bit offsets into the code segment and the data segment instead
1127 of a selector offset pair. The functions below store the upper 32
1128 bits of these pointers (instead of just the 16-bits of the segment
1129 selector). */
1130
1131 /* Fill register REGNUM in REGCACHE with the appropriate
1132 floating-point or SSE register value from *FXSAVE. If REGNUM is
1133 -1, do this for all registers. This function masks off any of the
1134 reserved bits in *FXSAVE. */
1135
1136 void
1137 x86_64_supply_fxsave (struct regcache *regcache, int regnum,
1138 const void *fxsave)
1139 {
1140 i387_supply_fxsave (regcache, regnum, fxsave);
1141
1142 if (fxsave)
1143 {
1144 const char *regs = fxsave;
1145
1146 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1147 regcache_raw_supply (regcache, I387_FISEG_REGNUM, regs + 12);
1148 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1149 regcache_raw_supply (regcache, I387_FOSEG_REGNUM, regs + 20);
1150 }
1151 }
1152
1153 /* Fill register REGNUM (if it is a floating-point or SSE register) in
1154 *FXSAVE with the value in GDB's register cache. If REGNUM is -1, do
1155 this for all registers. This function doesn't touch any of the
1156 reserved bits in *FXSAVE. */
1157
1158 void
1159 x86_64_fill_fxsave (char *fxsave, int regnum)
1160 {
1161 i387_fill_fxsave (fxsave, regnum);
1162
1163 if (regnum == -1 || regnum == I387_FISEG_REGNUM)
1164 regcache_collect (I387_FISEG_REGNUM, fxsave + 12);
1165 if (regnum == -1 || regnum == I387_FOSEG_REGNUM)
1166 regcache_collect (I387_FOSEG_REGNUM, fxsave + 20);
1167 }
This page took 0.053844 seconds and 5 git commands to generate.