X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Famd64-tdep.c;h=3e5d1bd8a7cdc62c87a0c77956500e162077b277;hb=4bd7dc42558fcf53bb0c783f852f03dcac38866f;hp=a4172fc2dc168d5624756c424d8245e4a7bb80f3;hpb=7f7930dd889bc0dcfd56b5dc362eb805b1f93560;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c index a4172fc2dc..3e5d1bd8a7 100644 --- a/gdb/amd64-tdep.c +++ b/gdb/amd64-tdep.c @@ -1,6 +1,6 @@ /* Target-dependent code for AMD64. - Copyright (C) 2001-2012 Free Software Foundation, Inc. + Copyright (C) 2001-2015 Free Software Foundation, Inc. Contributed by Jiri Smid, SuSE Labs. @@ -29,6 +29,7 @@ #include "frame-base.h" #include "frame-unwind.h" #include "inferior.h" +#include "infrun.h" #include "gdbcmd.h" #include "gdbcore.h" #include "objfiles.h" @@ -36,15 +37,17 @@ #include "regset.h" #include "symfile.h" #include "disasm.h" -#include "gdb_assert.h" -#include "exceptions.h" #include "amd64-tdep.h" #include "i387-tdep.h" #include "features/i386/amd64.c" #include "features/i386/amd64-avx.c" +#include "features/i386/amd64-mpx.c" +#include "features/i386/amd64-avx512.c" + #include "features/i386/x32.c" #include "features/i386/x32-avx.c" +#include "features/i386/x32-avx512.c" #include "ax.h" #include "ax-gdb.h" @@ -84,6 +87,14 @@ static const char *amd64_ymm_names[] = "ymm12", "ymm13", "ymm14", "ymm15" }; +static const char *amd64_ymm_avx512_names[] = +{ + "ymm16", "ymm17", "ymm18", "ymm19", + "ymm20", "ymm21", "ymm22", "ymm23", + "ymm24", "ymm25", "ymm26", "ymm27", + "ymm28", "ymm29", "ymm30", "ymm31" +}; + static const char *amd64_ymmh_names[] = { "ymm0h", "ymm1h", "ymm2h", "ymm3h", @@ -92,15 +103,54 @@ static const char *amd64_ymmh_names[] = "ymm12h", "ymm13h", "ymm14h", "ymm15h" }; -/* The registers used to pass integer arguments during a function call. */ -static int amd64_dummy_call_integer_regs[] = +static const char *amd64_ymmh_avx512_names[] = { - AMD64_RDI_REGNUM, /* %rdi */ - AMD64_RSI_REGNUM, /* %rsi */ - AMD64_RDX_REGNUM, /* %rdx */ - AMD64_RCX_REGNUM, /* %rcx */ - 8, /* %r8 */ - 9 /* %r9 */ + "ymm16h", "ymm17h", "ymm18h", "ymm19h", + "ymm20h", "ymm21h", "ymm22h", "ymm23h", + "ymm24h", "ymm25h", "ymm26h", "ymm27h", + "ymm28h", "ymm29h", "ymm30h", "ymm31h" +}; + +static const char *amd64_mpx_names[] = +{ + "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus" +}; + +static const char *amd64_k_names[] = +{ + "k0", "k1", "k2", "k3", + "k4", "k5", "k6", "k7" +}; + +static const char *amd64_zmmh_names[] = +{ + "zmm0h", "zmm1h", "zmm2h", "zmm3h", + "zmm4h", "zmm5h", "zmm6h", "zmm7h", + "zmm8h", "zmm9h", "zmm10h", "zmm11h", + "zmm12h", "zmm13h", "zmm14h", "zmm15h", + "zmm16h", "zmm17h", "zmm18h", "zmm19h", + "zmm20h", "zmm21h", "zmm22h", "zmm23h", + "zmm24h", "zmm25h", "zmm26h", "zmm27h", + "zmm28h", "zmm29h", "zmm30h", "zmm31h" +}; + +static const char *amd64_zmm_names[] = +{ + "zmm0", "zmm1", "zmm2", "zmm3", + "zmm4", "zmm5", "zmm6", "zmm7", + "zmm8", "zmm9", "zmm10", "zmm11", + "zmm12", "zmm13", "zmm14", "zmm15", + "zmm16", "zmm17", "zmm18", "zmm19", + "zmm20", "zmm21", "zmm22", "zmm23", + "zmm24", "zmm25", "zmm26", "zmm27", + "zmm28", "zmm29", "zmm30", "zmm31" +}; + +static const char *amd64_xmm_avx512_names[] = { + "xmm16", "xmm17", "xmm18", "xmm19", + "xmm20", "xmm21", "xmm22", "xmm23", + "xmm24", "xmm25", "xmm26", "xmm27", + "xmm28", "xmm29", "xmm30", "xmm31" }; /* DWARF Register Number Mapping as defined in the System V psABI, @@ -120,7 +170,14 @@ static int amd64_dwarf_regmap[] = AMD64_RSP_REGNUM, /* Extended Integer Registers 8 - 15. */ - 8, 9, 10, 11, 12, 13, 14, 15, + AMD64_R8_REGNUM, /* %r8 */ + AMD64_R9_REGNUM, /* %r9 */ + AMD64_R10_REGNUM, /* %r10 */ + AMD64_R11_REGNUM, /* %r11 */ + AMD64_R12_REGNUM, /* %r12 */ + AMD64_R13_REGNUM, /* %r13 */ + AMD64_R14_REGNUM, /* %r14 */ + AMD64_R15_REGNUM, /* %r15 */ /* Return Address RA. Mapped to RIP. */ AMD64_RIP_REGNUM, @@ -142,7 +199,13 @@ static int amd64_dwarf_regmap[] = AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3, AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5, AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7, - + + /* MMX Registers 0 - 7. + We have to handle those registers specifically, as their register + number within GDB depends on the target (or they may even not be + available at all). */ + -1, -1, -1, -1, -1, -1, -1, -1, + /* Control and Status Flags Register. */ AMD64_EFLAGS_REGNUM, @@ -270,8 +333,12 @@ amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum) struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); if (i386_byte_regnum_p (gdbarch, regnum)) return amd64_byte_names[regnum - tdep->al_regnum]; + else if (i386_zmm_regnum_p (gdbarch, regnum)) + return amd64_zmm_names[regnum - tdep->zmm0_regnum]; else if (i386_ymm_regnum_p (gdbarch, regnum)) return amd64_ymm_names[regnum - tdep->ymm0_regnum]; + else if (i386_ymm_avx512_regnum_p (gdbarch, regnum)) + return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum]; else if (i386_word_regnum_p (gdbarch, regnum)) return amd64_word_names[regnum - tdep->ax_regnum]; else if (i386_dword_regnum_p (gdbarch, regnum)) @@ -391,6 +458,20 @@ amd64_pseudo_register_write (struct gdbarch *gdbarch, +/* Register classes as defined in the psABI. */ + +enum amd64_reg_class +{ + AMD64_INTEGER, + AMD64_SSE, + AMD64_SSEUP, + AMD64_X87, + AMD64_X87UP, + AMD64_COMPLEX_X87, + AMD64_NO_CLASS, + AMD64_MEMORY +}; + /* Return the union class of CLASS1 and CLASS2. See the psABI for details. */ @@ -427,6 +508,8 @@ amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2) return AMD64_SSE; } +static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]); + /* Return non-zero if TYPE is a non-POD structure or union type. */ static int @@ -444,19 +527,19 @@ amd64_non_pod_p (struct type *type) arrays) and union types, and store the result in CLASS. */ static void -amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2]) +amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2]) { /* 1. If the size of an object is larger than two eightbytes, or in C++, is a non-POD structure or union type, or contains unaligned fields, it has class memory. */ if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type)) { - class[0] = class[1] = AMD64_MEMORY; + theclass[0] = theclass[1] = AMD64_MEMORY; return; } /* 2. Both eightbytes get initialized to class NO_CLASS. */ - class[0] = class[1] = AMD64_NO_CLASS; + theclass[0] = theclass[1] = AMD64_NO_CLASS; /* 3. Each field of an object is classified recursively so that always two fields are considered. The resulting class is @@ -468,9 +551,9 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2]) struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type)); /* All fields in an array have the same type. */ - amd64_classify (subtype, class); - if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS) - class[1] = class[0]; + amd64_classify (subtype, theclass); + if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS) + theclass[1] = theclass[0]; } else { @@ -499,7 +582,7 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2]) gdb_assert (pos == 0 || pos == 1); amd64_classify (subtype, subclass); - class[pos] = amd64_merge_classes (class[pos], subclass[0]); + theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]); if (bitsize <= 64 && pos == 0 && endpos == 1) /* This is a bit of an odd case: We have a field that would normally fit in one of the two eightbytes, except that @@ -523,9 +606,9 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2]) use up all 16 bytes of the aggregate, and are already handled just fine (because each portion sits on its own 8-byte). */ - class[1] = amd64_merge_classes (class[1], subclass[0]); + theclass[1] = amd64_merge_classes (theclass[1], subclass[0]); if (pos == 0) - class[1] = amd64_merge_classes (class[1], subclass[1]); + theclass[1] = amd64_merge_classes (theclass[1], subclass[1]); } } @@ -533,26 +616,26 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2]) /* Rule (a): If one of the classes is MEMORY, the whole argument is passed in memory. */ - if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY) - class[0] = class[1] = AMD64_MEMORY; + if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY) + theclass[0] = theclass[1] = AMD64_MEMORY; /* Rule (b): If SSEUP is not preceded by SSE, it is converted to SSE. */ - if (class[0] == AMD64_SSEUP) - class[0] = AMD64_SSE; - if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE) - class[1] = AMD64_SSE; + if (theclass[0] == AMD64_SSEUP) + theclass[0] = AMD64_SSE; + if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE) + theclass[1] = AMD64_SSE; } /* Classify TYPE, and store the result in CLASS. */ -void -amd64_classify (struct type *type, enum amd64_reg_class class[2]) +static void +amd64_classify (struct type *type, enum amd64_reg_class theclass[2]) { enum type_code code = TYPE_CODE (type); int len = TYPE_LENGTH (type); - class[0] = class[1] = AMD64_NO_CLASS; + theclass[0] = theclass[1] = AMD64_NO_CLASS; /* Arguments of types (signed and unsigned) _Bool, char, short, int, long, long long, and pointers are in the INTEGER class. Similarly, @@ -563,28 +646,28 @@ amd64_classify (struct type *type, enum amd64_reg_class class[2]) || code == TYPE_CODE_CHAR || code == TYPE_CODE_PTR || code == TYPE_CODE_REF) && (len == 1 || len == 2 || len == 4 || len == 8)) - class[0] = AMD64_INTEGER; + theclass[0] = AMD64_INTEGER; /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64 are in class SSE. */ else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT) && (len == 4 || len == 8)) /* FIXME: __m64 . */ - class[0] = AMD64_SSE; + theclass[0] = AMD64_SSE; /* Arguments of types __float128, _Decimal128 and __m128 are split into two halves. The least significant ones belong to class SSE, the most significant one to class SSEUP. */ else if (code == TYPE_CODE_DECFLOAT && len == 16) /* FIXME: __float128, __m128. */ - class[0] = AMD64_SSE, class[1] = AMD64_SSEUP; + theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP; /* The 64-bit mantissa of arguments of type long double belongs to class X87, the 16-bit exponent plus 6 bytes of padding belongs to class X87UP. */ else if (code == TYPE_CODE_FLT && len == 16) /* Class X87 and X87UP. */ - class[0] = AMD64_X87, class[1] = AMD64_X87UP; + theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP; /* Arguments of complex T where T is one of the types float or double get treated as if they are implemented as: @@ -592,21 +675,23 @@ amd64_classify (struct type *type, enum amd64_reg_class class[2]) struct complexT { T real; T imag; - }; */ + }; + + */ else if (code == TYPE_CODE_COMPLEX && len == 8) - class[0] = AMD64_SSE; + theclass[0] = AMD64_SSE; else if (code == TYPE_CODE_COMPLEX && len == 16) - class[0] = class[1] = AMD64_SSE; + theclass[0] = theclass[1] = AMD64_SSE; /* A variable of type complex long double is classified as type COMPLEX_X87. */ else if (code == TYPE_CODE_COMPLEX && len == 32) - class[0] = AMD64_COMPLEX_X87; + theclass[0] = AMD64_COMPLEX_X87; /* Aggregates. */ else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT || code == TYPE_CODE_UNION) - amd64_classify_aggregate (type, class); + amd64_classify_aggregate (type, theclass); } static enum return_value_convention @@ -614,8 +699,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, struct type *type, struct regcache *regcache, gdb_byte *readbuf, const gdb_byte *writebuf) { - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - enum amd64_reg_class class[2]; + enum amd64_reg_class theclass[2]; int len = TYPE_LENGTH (type); static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM }; static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM }; @@ -624,10 +708,9 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, int i; gdb_assert (!(readbuf && writebuf)); - gdb_assert (tdep->classify); /* 1. Classify the return type with the classification algorithm. */ - tdep->classify (type, class); + amd64_classify (type, theclass); /* 2. If the type has class MEMORY, then the caller provides space for the return value and passes the address of this storage in @@ -636,7 +719,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, On return %rax will contain the address that has been passed in by the caller in %rdi. */ - if (class[0] == AMD64_MEMORY) + if (theclass[0] == AMD64_MEMORY) { /* As indicated by the comment above, the ABI guarantees that we can always find the return value just after the function has @@ -655,7 +738,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, /* 8. If the class is COMPLEX_X87, the real part of the value is returned in %st0 and the imaginary part in %st1. */ - if (class[0] == AMD64_COMPLEX_X87) + if (theclass[0] == AMD64_COMPLEX_X87) { if (readbuf) { @@ -677,7 +760,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, return RETURN_VALUE_REGISTER_CONVENTION; } - gdb_assert (class[1] != AMD64_MEMORY); + gdb_assert (theclass[1] != AMD64_MEMORY); gdb_assert (len <= 16); for (i = 0; len > 0; i++, len -= 8) @@ -685,7 +768,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, int regnum = -1; int offset = 0; - switch (class[i]) + switch (theclass[i]) { case AMD64_INTEGER: /* 3. If the class is INTEGER, the next available register @@ -718,7 +801,7 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, case AMD64_X87UP: /* 7. If the class is X87UP, the value is returned together with the previous X87 value in %st0. */ - gdb_assert (i > 0 && class[0] == AMD64_X87); + gdb_assert (i > 0 && theclass[0] == AMD64_X87); regnum = AMD64_ST0_REGNUM; offset = 8; len = 2; @@ -749,11 +832,15 @@ static CORE_ADDR amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args, CORE_ADDR sp, int struct_return) { - struct gdbarch *gdbarch = get_regcache_arch (regcache); - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - int *integer_regs = tdep->call_dummy_integer_regs; - int num_integer_regs = tdep->call_dummy_num_integer_regs; - + static int integer_regnum[] = + { + AMD64_RDI_REGNUM, /* %rdi */ + AMD64_RSI_REGNUM, /* %rsi */ + AMD64_RDX_REGNUM, /* %rdx */ + AMD64_RCX_REGNUM, /* %rcx */ + AMD64_R8_REGNUM, /* %r8 */ + AMD64_R9_REGNUM /* %r9 */ + }; static int sse_regnum[] = { /* %xmm0 ... %xmm7 */ @@ -763,11 +850,6 @@ amd64_push_arguments (struct regcache *regcache, int nargs, AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7, }; struct value **stack_args = alloca (nargs * sizeof (struct value *)); - /* An array that mirrors the stack_args array. For all arguments - that are passed by MEMORY, if that argument's address also needs - to be stored in a register, the ARG_ADDR_REGNO array will contain - that register number (or a negative value otherwise). */ - int *arg_addr_regno = alloca (nargs * sizeof (int)); int num_stack_args = 0; int num_elements = 0; int element = 0; @@ -775,8 +857,6 @@ amd64_push_arguments (struct regcache *regcache, int nargs, int sse_reg = 0; int i; - gdb_assert (tdep->classify); - /* Reserve a register for the "hidden" argument. */ if (struct_return) integer_reg++; @@ -785,45 +865,33 @@ amd64_push_arguments (struct regcache *regcache, int nargs, { struct type *type = value_type (args[i]); int len = TYPE_LENGTH (type); - enum amd64_reg_class class[2]; + enum amd64_reg_class theclass[2]; int needed_integer_regs = 0; int needed_sse_regs = 0; int j; /* Classify argument. */ - tdep->classify (type, class); + amd64_classify (type, theclass); /* Calculate the number of integer and SSE registers needed for this argument. */ for (j = 0; j < 2; j++) { - if (class[j] == AMD64_INTEGER) + if (theclass[j] == AMD64_INTEGER) needed_integer_regs++; - else if (class[j] == AMD64_SSE) + else if (theclass[j] == AMD64_SSE) needed_sse_regs++; } /* Check whether enough registers are available, and if the argument should be passed in registers at all. */ - if (integer_reg + needed_integer_regs > num_integer_regs + if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum) || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum) || (needed_integer_regs == 0 && needed_sse_regs == 0)) { /* The argument will be passed on the stack. */ num_elements += ((len + 7) / 8); - stack_args[num_stack_args] = args[i]; - /* If this is an AMD64_MEMORY argument whose address must also - be passed in one of the integer registers, reserve that - register and associate this value to that register so that - we can store the argument address as soon as we know it. */ - if (class[0] == AMD64_MEMORY - && tdep->memory_args_by_pointer - && integer_reg < tdep->call_dummy_num_integer_regs) - arg_addr_regno[num_stack_args] = - tdep->call_dummy_integer_regs[integer_reg++]; - else - arg_addr_regno[num_stack_args] = -1; - num_stack_args++; + stack_args[num_stack_args++] = args[i]; } else { @@ -838,10 +906,10 @@ amd64_push_arguments (struct regcache *regcache, int nargs, int regnum = -1; int offset = 0; - switch (class[j]) + switch (theclass[j]) { case AMD64_INTEGER: - regnum = integer_regs[integer_reg++]; + regnum = integer_regnum[integer_reg++]; break; case AMD64_SSE: @@ -878,20 +946,10 @@ amd64_push_arguments (struct regcache *regcache, int nargs, { struct type *type = value_type (stack_args[i]); const gdb_byte *valbuf = value_contents (stack_args[i]); - CORE_ADDR arg_addr = sp + element * 8; - - write_memory (arg_addr, valbuf, TYPE_LENGTH (type)); - if (arg_addr_regno[i] >= 0) - { - /* We also need to store the address of that argument in - the given register. */ - gdb_byte buf[8]; - enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - - store_unsigned_integer (buf, 8, byte_order, arg_addr); - regcache_cooked_write (regcache, arg_addr_regno[i], buf); - } - element += ((TYPE_LENGTH (type) + 7) / 8); + int len = TYPE_LENGTH (type); + + write_memory (sp + element * 8, valbuf, len); + element += ((len + 7) / 8); } /* The psABI says that "For calls that may call functions that use @@ -909,7 +967,6 @@ amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, int struct_return, CORE_ADDR struct_addr) { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); gdb_byte buf[8]; /* Pass arguments. */ @@ -918,19 +975,10 @@ amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, /* Pass "hidden" argument". */ if (struct_return) { - /* The "hidden" argument is passed throught the first argument - register. */ - const int arg_regnum = tdep->call_dummy_integer_regs[0]; - store_unsigned_integer (buf, 8, byte_order, struct_addr); - regcache_cooked_write (regcache, arg_regnum, buf); + regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf); } - /* Reserve some memory on the stack for the integer-parameter registers, - if required by the ABI. */ - if (tdep->integer_param_regs_saved_in_caller_frame) - sp -= tdep->call_dummy_num_integer_regs * 8; - /* Store return address. */ sp -= 8; store_unsigned_integer (buf, 8, byte_order, bp_addr); @@ -1364,6 +1412,24 @@ amd64_absolute_jmp_p (const struct amd64_insn *details) return 0; } +/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */ + +static int +amd64_jmp_p (const struct amd64_insn *details) +{ + const gdb_byte *insn = &details->raw_insn[details->opcode_offset]; + + /* jump short, relative. */ + if (insn[0] == 0xeb) + return 1; + + /* jump near, relative. */ + if (insn[0] == 0xe9) + return 1; + + return amd64_absolute_jmp_p (details); +} + static int amd64_absolute_call_p (const struct amd64_insn *details) { @@ -1435,6 +1501,52 @@ amd64_syscall_p (const struct amd64_insn *details, int *lengthp) return 0; } +/* Classify the instruction at ADDR using PRED. + Throw an error if the memory can't be read. */ + +static int +amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr, + int (*pred) (const struct amd64_insn *)) +{ + struct amd64_insn details; + gdb_byte *buf; + int len, classification; + + len = gdbarch_max_insn_length (gdbarch); + buf = alloca (len); + + read_code (addr, buf, len); + amd64_get_insn_details (buf, &details); + + classification = pred (&details); + + return classification; +} + +/* The gdbarch insn_is_call method. */ + +static int +amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr) +{ + return amd64_classify_insn_at (gdbarch, addr, amd64_call_p); +} + +/* The gdbarch insn_is_ret method. */ + +static int +amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr) +{ + return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p); +} + +/* The gdbarch insn_is_jump method. */ + +static int +amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr) +{ + return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p); +} + /* Fix up the state of registers and memory after having single-stepped a displaced instruction. */ @@ -1631,7 +1743,7 @@ amd64_relocate_instruction (struct gdbarch *gdbarch, /* Where "ret" in the original code will return to. */ ret_addr = oldloc + insn_length; push_buf[0] = 0x68; /* pushq $... */ - memcpy (&push_buf[1], &ret_addr, 4); + store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr); /* Push the push. */ append_insns (to, 5, push_buf); @@ -1780,7 +1892,7 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, int reg, r; int offset, offset_and; - if (target_read_memory (pc, buf, sizeof buf)) + if (target_read_code (pc, buf, sizeof buf)) return pc; /* Check caller-saved saved register. The first instruction has @@ -2123,7 +2235,7 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, else pc = amd64_analyze_stack_align (pc, current_pc, cache); - op = read_memory_unsigned_integer (pc, 1, byte_order); + op = read_code_unsigned_integer (pc, 1, byte_order); if (op == 0x55) /* pushq %rbp */ { @@ -2136,7 +2248,7 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, if (current_pc <= pc + 1) return current_pc; - read_memory (pc + 1, buf, 3); + read_code (pc + 1, buf, 3); /* Check for `movq %rsp, %rbp'. */ if (memcmp (buf, mov_rsp_rbp_1, 3) == 0 @@ -2195,7 +2307,8 @@ amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc) start_pc_sal = find_pc_sect_line (start_pc, NULL, 0); if (start_pc_sal.symtab == NULL - || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6 + || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER + (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end) return pc; @@ -2204,7 +2317,7 @@ amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc) return pc; /* START_PC can be from overlayed memory, ignored here. */ - if (target_read_memory (next_sal.pc - 4, buf, sizeof (buf)) != 0) + if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0) return pc; /* test %al,%al */ @@ -2252,6 +2365,22 @@ amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc) { struct amd64_frame_cache cache; CORE_ADDR pc; + CORE_ADDR func_addr; + + if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL)) + { + CORE_ADDR post_prologue_pc + = skip_prologue_using_sal (gdbarch, func_addr); + struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr); + + /* Clang always emits a line note before the prologue and another + one after. We trust clang to emit usable line notes. */ + if (post_prologue_pc + && (cust != NULL + && COMPUNIT_PRODUCER (cust) != NULL + && startswith (COMPUNIT_PRODUCER (cust), "clang "))) + return max (start_pc, post_prologue_pc); + } amd64_init_frame_cache (&cache); pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL, @@ -2337,7 +2466,6 @@ amd64_frame_cache_1 (struct frame_info *this_frame, static struct amd64_frame_cache * amd64_frame_cache (struct frame_info *this_frame, void **this_cache) { - volatile struct gdb_exception ex; struct amd64_frame_cache *cache; if (*this_cache) @@ -2346,12 +2474,16 @@ amd64_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); *this_cache = cache; - TRY_CATCH (ex, RETURN_MASK_ERROR) + TRY { amd64_frame_cache_1 (this_frame, cache); } - if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + CATCH (ex, RETURN_MASK_ERROR) + { + if (ex.error != NOT_AVAILABLE_ERROR) + throw_exception (ex); + } + END_CATCH return cache; } @@ -2381,13 +2513,14 @@ amd64_frame_this_id (struct frame_info *this_frame, void **this_cache, amd64_frame_cache (this_frame, this_cache); if (!cache->base_p) - return; - - /* This marks the outermost frame. */ - if (cache->base == 0) - return; - - (*this_id) = frame_id_build (cache->base + 16, cache->pc); + (*this_id) = frame_id_build_unavailable_stack (cache->pc); + else if (cache->base == 0) + { + /* This marks the outermost frame. */ + return; + } + else + (*this_id) = frame_id_build (cache->base + 16, cache->pc); } static struct value * @@ -2449,7 +2582,6 @@ amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache) struct gdbarch *gdbarch = get_frame_arch (this_frame); struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - volatile struct gdb_exception ex; struct amd64_frame_cache *cache; CORE_ADDR addr; gdb_byte buf[8]; @@ -2460,7 +2592,7 @@ amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); - TRY_CATCH (ex, RETURN_MASK_ERROR) + TRY { get_frame_register (this_frame, AMD64_RSP_REGNUM, buf); cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8; @@ -2474,8 +2606,12 @@ amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache) cache->base_p = 1; } - if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + CATCH (ex, RETURN_MASK_ERROR) + { + if (ex.error != NOT_AVAILABLE_ERROR) + throw_exception (ex); + } + END_CATCH *this_cache = cache; return cache; @@ -2502,9 +2638,14 @@ amd64_sigtramp_frame_this_id (struct frame_info *this_frame, amd64_sigtramp_frame_cache (this_frame, this_cache); if (!cache->base_p) - return; - - (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame)); + (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame)); + else if (cache->base == 0) + { + /* This marks the outermost frame. */ + return; + } + else + (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame)); } static struct value * @@ -2585,10 +2726,10 @@ static int amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) { gdb_byte insn; - struct symtab *symtab; + struct compunit_symtab *cust; - symtab = find_pc_symtab (pc); - if (symtab && symtab->epilogue_unwind_valid) + cust = find_pc_compunit_symtab (pc); + if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust)) return 0; if (target_read_memory (pc, &insn, 1)) @@ -2617,7 +2758,6 @@ amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache) { struct gdbarch *gdbarch = get_frame_arch (this_frame); enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); - volatile struct gdb_exception ex; struct amd64_frame_cache *cache; gdb_byte buf[8]; @@ -2627,7 +2767,7 @@ amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); *this_cache = cache; - TRY_CATCH (ex, RETURN_MASK_ERROR) + TRY { /* Cache base will be %esp plus cache->sp_offset (-8). */ get_frame_register (this_frame, AMD64_RSP_REGNUM, buf); @@ -2645,8 +2785,12 @@ amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache) cache->base_p = 1; } - if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + CATCH (ex, RETURN_MASK_ERROR) + { + if (ex.error != NOT_AVAILABLE_ERROR) + throw_exception (ex); + } + END_CATCH return cache; } @@ -2673,9 +2817,9 @@ amd64_epilogue_frame_this_id (struct frame_info *this_frame, this_cache); if (!cache->base_p) - return; - - (*this_id) = frame_id_build (cache->base + 8, cache->pc); + (*this_id) = frame_id_build_unavailable_stack (cache->pc); + else + (*this_id) = frame_id_build (cache->base + 8, cache->pc); } static const struct frame_unwind amd64_epilogue_frame_unwind = @@ -2715,9 +2859,10 @@ static void amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache, int regnum, const void *fpregs, size_t len) { - const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch); + struct gdbarch *gdbarch = get_regcache_arch (regcache); + const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - gdb_assert (len == tdep->sizeof_fpregset); + gdb_assert (len >= tdep->sizeof_fpregset); amd64_supply_fxsave (regcache, regnum, fpregs); } @@ -2731,62 +2876,17 @@ amd64_collect_fpregset (const struct regset *regset, const struct regcache *regcache, int regnum, void *fpregs, size_t len) { - const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch); + struct gdbarch *gdbarch = get_regcache_arch (regcache); + const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - gdb_assert (len == tdep->sizeof_fpregset); + gdb_assert (len >= tdep->sizeof_fpregset); amd64_collect_fxsave (regcache, regnum, fpregs); } -/* Similar to amd64_supply_fpregset, but use XSAVE extended state. */ - -static void -amd64_supply_xstateregset (const struct regset *regset, - struct regcache *regcache, int regnum, - const void *xstateregs, size_t len) -{ - amd64_supply_xsave (regcache, regnum, xstateregs); -} - -/* Similar to amd64_collect_fpregset, but use XSAVE extended state. */ - -static void -amd64_collect_xstateregset (const struct regset *regset, - const struct regcache *regcache, - int regnum, void *xstateregs, size_t len) -{ - amd64_collect_xsave (regcache, regnum, xstateregs, 1); -} - -/* Return the appropriate register set for the core section identified - by SECT_NAME and SECT_SIZE. */ - -static const struct regset * -amd64_regset_from_core_section (struct gdbarch *gdbarch, - const char *sect_name, size_t sect_size) -{ - struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - - if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset) - { - if (tdep->fpregset == NULL) - tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset, - amd64_collect_fpregset); - - return tdep->fpregset; - } - - if (strcmp (sect_name, ".reg-xstate") == 0) - { - if (tdep->xstateregset == NULL) - tdep->xstateregset = regset_alloc (gdbarch, - amd64_supply_xstateregset, - amd64_collect_xstateregset); - - return tdep->xstateregset; - } - - return i386_regset_from_core_section (gdbarch, sect_name, sect_size); -} +const struct regset amd64_fpregset = + { + NULL, amd64_supply_fpregset, amd64_collect_fpregset + }; /* Figure out where the longjmp will land. Slurp the jmp_buf out of @@ -2835,10 +2935,17 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) { struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); const struct target_desc *tdesc = info.target_desc; + static const char *const stap_integer_prefixes[] = { "$", NULL }; + static const char *const stap_register_prefixes[] = { "%", NULL }; + static const char *const stap_register_indirection_prefixes[] = { "(", + NULL }; + static const char *const stap_register_indirection_suffixes[] = { ")", + NULL }; /* AMD64 generally uses `fxsave' instead of `fsave' for saving its floating-point registers. */ tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE; + tdep->fpregset = &amd64_fpregset; if (! tdesc_has_registers (tdesc)) tdesc = tdesc_amd64; @@ -2847,6 +2954,23 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS; tdep->register_names = amd64_register_names; + if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL) + { + tdep->zmmh_register_names = amd64_zmmh_names; + tdep->k_register_names = amd64_k_names; + tdep->xmm_avx512_register_names = amd64_xmm_avx512_names; + tdep->ymm16h_register_names = amd64_ymmh_avx512_names; + + tdep->num_zmm_regs = 32; + tdep->num_xmm_avx512_regs = 16; + tdep->num_ymm_avx512_regs = 16; + + tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM; + tdep->k0_regnum = AMD64_K0_REGNUM; + tdep->xmm16_regnum = AMD64_XMM16_REGNUM; + tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM; + } + if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL) { tdep->ymmh_register_names = amd64_ymmh_names; @@ -2854,6 +2978,13 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM; } + if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL) + { + tdep->mpx_register_names = amd64_mpx_names; + tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM; + tdep->bnd0r_regnum = AMD64_BND0R_REGNUM; + } + tdep->num_byte_regs = 20; tdep->num_word_regs = 16; tdep->num_dword_regs = 16; @@ -2905,10 +3036,6 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call); set_gdbarch_frame_align (gdbarch, amd64_frame_align); set_gdbarch_frame_red_zone_size (gdbarch, 128); - tdep->call_dummy_num_integer_regs = - ARRAY_SIZE (amd64_dummy_call_integer_regs); - tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs; - tdep->classify = amd64_classify; set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p); set_gdbarch_register_to_value (gdbarch, i387_register_to_value); @@ -2932,11 +3059,6 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind); frame_base_set_default (gdbarch, &amd64_frame_base); - /* If we have a register mapping, enable the generic core file support. */ - if (tdep->gregset_reg_offset) - set_gdbarch_regset_from_core_section (gdbarch, - amd64_regset_from_core_section); - set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target); set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction); @@ -2944,14 +3066,19 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address); /* SystemTap variables and functions. */ - set_gdbarch_stap_integer_prefix (gdbarch, "$"); - set_gdbarch_stap_register_prefix (gdbarch, "%"); - set_gdbarch_stap_register_indirection_prefix (gdbarch, "("); - set_gdbarch_stap_register_indirection_suffix (gdbarch, ")"); + set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes); + set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes); + set_gdbarch_stap_register_indirection_prefixes (gdbarch, + stap_register_indirection_prefixes); + set_gdbarch_stap_register_indirection_suffixes (gdbarch, + stap_register_indirection_suffixes); set_gdbarch_stap_is_single_operand (gdbarch, i386_stap_is_single_operand); set_gdbarch_stap_parse_special_token (gdbarch, i386_stap_parse_special_token); + set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call); + set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret); + set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump); } @@ -2999,8 +3126,12 @@ _initialize_amd64_tdep (void) { initialize_tdesc_amd64 (); initialize_tdesc_amd64_avx (); + initialize_tdesc_amd64_mpx (); + initialize_tdesc_amd64_avx512 (); + initialize_tdesc_x32 (); initialize_tdesc_x32_avx (); + initialize_tdesc_x32_avx512 (); }