X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=gdb%2Fdwarf2expr.c;h=b9ae108c5ffecd3a943711d5a52d1de66363e433;hb=ac56253ddece35aff4402b848f88ba40856102b1;hp=1296633a453c0d65d15416a82376b26f09026351;hpb=ef0fdf07a40a2097075d60d745150408f0ea9c30;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/dwarf2expr.c b/gdb/dwarf2expr.c index 1296633a45..b9ae108c5f 100644 --- a/gdb/dwarf2expr.c +++ b/gdb/dwarf2expr.c @@ -1,6 +1,7 @@ /* DWARF 2 Expression Evaluator. - Copyright 2001, 2002, 2003, 2005 Free Software Foundation, Inc. + Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010 + Free Software Foundation, Inc. Contributed by Daniel Berlin (dan@dberlin.org) @@ -8,7 +9,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or + the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, @@ -17,22 +18,21 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. */ + along with this program. If not, see . */ #include "defs.h" #include "symtab.h" #include "gdbtypes.h" #include "value.h" #include "gdbcore.h" -#include "elf/dwarf2.h" +#include "dwarf2.h" #include "dwarf2expr.h" +#include "gdb_assert.h" /* Local prototypes. */ static void execute_stack_op (struct dwarf_expr_context *, - gdb_byte *, gdb_byte *); + const gdb_byte *, const gdb_byte *); /* Create a new context for the expression evaluator. */ @@ -40,12 +40,15 @@ struct dwarf_expr_context * new_dwarf_expr_context (void) { struct dwarf_expr_context *retval; + retval = xcalloc (1, sizeof (struct dwarf_expr_context)); retval->stack_len = 0; retval->stack_allocated = 10; - retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR)); + retval->stack = xmalloc (retval->stack_allocated + * sizeof (struct dwarf_stack_value)); retval->num_pieces = 0; retval->pieces = 0; + retval->max_recursion_depth = 0x100; return retval; } @@ -59,6 +62,22 @@ free_dwarf_expr_context (struct dwarf_expr_context *ctx) xfree (ctx); } +/* Helper for make_cleanup_free_dwarf_expr_context. */ + +static void +free_dwarf_expr_context_cleanup (void *arg) +{ + free_dwarf_expr_context (arg); +} + +/* Return a cleanup that calls free_dwarf_expr_context. */ + +struct cleanup * +make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx) +{ + return make_cleanup (free_dwarf_expr_context_cleanup, ctx); +} + /* Expand the memory allocated to CTX's stack to contain at least NEED more elements than are currently used. */ @@ -68,8 +87,9 @@ dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) if (ctx->stack_len + need > ctx->stack_allocated) { size_t newlen = ctx->stack_len + need + 10; + ctx->stack = xrealloc (ctx->stack, - newlen * sizeof (CORE_ADDR)); + newlen * sizeof (struct dwarf_stack_value)); ctx->stack_allocated = newlen; } } @@ -77,10 +97,20 @@ dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) /* Push VALUE onto CTX's stack. */ void -dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value) +dwarf_expr_push (struct dwarf_expr_context *ctx, ULONGEST value, + int in_stack_memory) { + struct dwarf_stack_value *v; + + /* We keep all stack elements within the range defined by the + DWARF address size. */ + if (ctx->addr_size < sizeof (ULONGEST)) + value &= ((ULONGEST) 1 << (ctx->addr_size * HOST_CHAR_BIT)) - 1; + dwarf_expr_grow_stack (ctx, 1); - ctx->stack[ctx->stack_len++] = value; + v = &ctx->stack[ctx->stack_len++]; + v->value = value; + v->in_stack_memory = in_stack_memory; } /* Pop the top item off of CTX's stack. */ @@ -95,54 +125,142 @@ dwarf_expr_pop (struct dwarf_expr_context *ctx) /* Retrieve the N'th item on CTX's stack. */ -CORE_ADDR +ULONGEST dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n) { if (ctx->stack_len <= n) error (_("Asked for position %d of stack, stack only has %d elements on it."), n, ctx->stack_len); - return ctx->stack[ctx->stack_len - (1 + n)]; + return ctx->stack[ctx->stack_len - (1 + n)].value; + +} +/* Retrieve the N'th item on CTX's stack, converted to an address. */ + +CORE_ADDR +dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n) +{ + ULONGEST result = dwarf_expr_fetch (ctx, n); + + /* For most architectures, calling extract_unsigned_integer() alone + is sufficient for extracting an address. However, some + architectures (e.g. MIPS) use signed addresses and using + extract_unsigned_integer() will not produce a correct + result. Make sure we invoke gdbarch_integer_to_address() + for those architectures which require it. */ + if (gdbarch_integer_to_address_p (ctx->gdbarch)) + { + enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); + gdb_byte *buf = alloca (ctx->addr_size); + struct type *int_type; + + switch (ctx->addr_size) + { + case 2: + int_type = builtin_type (ctx->gdbarch)->builtin_uint16; + break; + case 4: + int_type = builtin_type (ctx->gdbarch)->builtin_uint32; + break; + case 8: + int_type = builtin_type (ctx->gdbarch)->builtin_uint64; + break; + default: + internal_error (__FILE__, __LINE__, + _("Unsupported address size.\n")); + } + + store_unsigned_integer (buf, ctx->addr_size, byte_order, result); + return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf); + } + + return (CORE_ADDR) result; +} + +/* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */ + +int +dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n) +{ + if (ctx->stack_len <= n) + error (_("Asked for position %d of stack, stack only has %d elements on it."), + n, ctx->stack_len); + return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory; + +} + +/* Return true if the expression stack is empty. */ + +static int +dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx) +{ + return ctx->stack_len == 0; } /* Add a new piece to CTX's piece list. */ static void -add_piece (struct dwarf_expr_context *ctx, - int in_reg, CORE_ADDR value, ULONGEST size) +add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset) { struct dwarf_expr_piece *p; ctx->num_pieces++; - if (ctx->pieces) - ctx->pieces = xrealloc (ctx->pieces, - (ctx->num_pieces - * sizeof (struct dwarf_expr_piece))); - else - ctx->pieces = xmalloc (ctx->num_pieces - * sizeof (struct dwarf_expr_piece)); + ctx->pieces = xrealloc (ctx->pieces, + (ctx->num_pieces + * sizeof (struct dwarf_expr_piece))); p = &ctx->pieces[ctx->num_pieces - 1]; - p->in_reg = in_reg; - p->value = value; + p->location = ctx->location; p->size = size; + p->offset = offset; + + if (p->location == DWARF_VALUE_LITERAL) + { + p->v.literal.data = ctx->data; + p->v.literal.length = ctx->len; + } + else if (dwarf_expr_stack_empty_p (ctx)) + { + p->location = DWARF_VALUE_OPTIMIZED_OUT; + /* Also reset the context's location, for our callers. This is + a somewhat strange approach, but this lets us avoid setting + the location to DWARF_VALUE_MEMORY in all the individual + cases in the evaluator. */ + ctx->location = DWARF_VALUE_OPTIMIZED_OUT; + } + else if (p->location == DWARF_VALUE_MEMORY) + { + p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0); + p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); + } + else + { + p->v.value = dwarf_expr_fetch (ctx, 0); + } } /* Evaluate the expression at ADDR (LEN bytes long) using the context CTX. */ void -dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len) +dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr, + size_t len) { + int old_recursion_depth = ctx->recursion_depth; + execute_stack_op (ctx, addr, addr + len); + + /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */ + + gdb_assert (ctx->recursion_depth == old_recursion_depth); } /* Decode the unsigned LEB128 constant at BUF into the variable pointed to by R, and return the new value of BUF. Verify that it doesn't extend past BUF_END. */ -gdb_byte * -read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r) +const gdb_byte * +read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r) { unsigned shift = 0; ULONGEST result = 0; @@ -167,8 +285,8 @@ read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r) by R, and return the new value of BUF. Verify that it doesn't extend past BUF_END. */ -gdb_byte * -read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r) +const gdb_byte * +read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r) { unsigned shift = 0; LONGEST result = 0; @@ -191,80 +309,58 @@ read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r) *r = result; return buf; } + -/* Read an address from BUF, and verify that it doesn't extend past - BUF_END. The address is returned, and *BYTES_READ is set to the - number of bytes read from BUF. */ - -CORE_ADDR -dwarf2_read_address (gdb_byte *buf, gdb_byte *buf_end, int *bytes_read) -{ - CORE_ADDR result; - - if (buf_end - buf < TARGET_ADDR_BIT / TARGET_CHAR_BIT) - error (_("dwarf2_read_address: Corrupted DWARF expression.")); - - *bytes_read = TARGET_ADDR_BIT / TARGET_CHAR_BIT; - /* NOTE: cagney/2003-05-22: This extract is assuming that a DWARF 2 - address is always unsigned. That may or may not be true. */ - result = extract_unsigned_integer (buf, TARGET_ADDR_BIT / TARGET_CHAR_BIT); - return result; -} - -/* Return the type of an address, for unsigned arithmetic. */ +/* Check that the current operator is either at the end of an + expression, or that it is followed by a composition operator. */ -static struct type * -unsigned_address_type (void) +void +dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end, + const char *op_name) { - switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT) - { - case 2: - return builtin_type_uint16; - case 4: - return builtin_type_uint32; - case 8: - return builtin_type_uint64; - default: - internal_error (__FILE__, __LINE__, - _("Unsupported address size.\n")); - } + /* It seems like DW_OP_GNU_uninit should be handled here. However, + it doesn't seem to make sense for DW_OP_*_value, and it was not + checked at the other place that this function is called. */ + if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece) + error (_("DWARF-2 expression error: `%s' operations must be " + "used either alone or in conjuction with DW_OP_piece " + "or DW_OP_bit_piece."), + op_name); } -/* Return the type of an address, for signed arithmetic. */ - -static struct type * -signed_address_type (void) -{ - switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT) - { - case 2: - return builtin_type_int16; - case 4: - return builtin_type_int32; - case 8: - return builtin_type_int64; - default: - internal_error (__FILE__, __LINE__, - _("Unsupported address size.\n")); - } -} - /* The engine for the expression evaluator. Using the context in CTX, evaluate the expression between OP_PTR and OP_END. */ static void execute_stack_op (struct dwarf_expr_context *ctx, - gdb_byte *op_ptr, gdb_byte *op_end) + const gdb_byte *op_ptr, const gdb_byte *op_end) { - ctx->in_reg = 0; +#define sign_ext(x) ((LONGEST) (((x) ^ sign_bit) - sign_bit)) + ULONGEST sign_bit = (ctx->addr_size >= sizeof (ULONGEST) ? 0 + : ((ULONGEST) 1) << (ctx->addr_size * 8 - 1)); + enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); + + ctx->location = DWARF_VALUE_MEMORY; + ctx->initialized = 1; /* Default is initialized. */ + + if (ctx->recursion_depth > ctx->max_recursion_depth) + error (_("DWARF-2 expression error: Loop detected (%d)."), + ctx->recursion_depth); + ctx->recursion_depth++; while (op_ptr < op_end) { enum dwarf_location_atom op = *op_ptr++; - CORE_ADDR result; + ULONGEST result; + /* Assume the value is not in stack memory. + Code that knows otherwise sets this to 1. + Some arithmetic on stack addresses can probably be assumed to still + be a stack address, but we skip this complication for now. + This is just an optimization, so it's always ok to punt + and leave this as 0. */ + int in_stack_memory = 0; ULONGEST uoffset, reg; LONGEST offset; - int bytes_read; switch (op) { @@ -304,40 +400,47 @@ execute_stack_op (struct dwarf_expr_context *ctx, break; case DW_OP_addr: - result = dwarf2_read_address (op_ptr, op_end, &bytes_read); - op_ptr += bytes_read; + result = extract_unsigned_integer (op_ptr, + ctx->addr_size, byte_order); + op_ptr += ctx->addr_size; + /* Some versions of GCC emit DW_OP_addr before + DW_OP_GNU_push_tls_address. In this case the value is an + index, not an address. We don't support things like + branching between the address and the TLS op. */ + if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address) + result += ctx->offset; break; case DW_OP_const1u: - result = extract_unsigned_integer (op_ptr, 1); + result = extract_unsigned_integer (op_ptr, 1, byte_order); op_ptr += 1; break; case DW_OP_const1s: - result = extract_signed_integer (op_ptr, 1); + result = extract_signed_integer (op_ptr, 1, byte_order); op_ptr += 1; break; case DW_OP_const2u: - result = extract_unsigned_integer (op_ptr, 2); + result = extract_unsigned_integer (op_ptr, 2, byte_order); op_ptr += 2; break; case DW_OP_const2s: - result = extract_signed_integer (op_ptr, 2); + result = extract_signed_integer (op_ptr, 2, byte_order); op_ptr += 2; break; case DW_OP_const4u: - result = extract_unsigned_integer (op_ptr, 4); + result = extract_unsigned_integer (op_ptr, 4, byte_order); op_ptr += 4; break; case DW_OP_const4s: - result = extract_signed_integer (op_ptr, 4); + result = extract_signed_integer (op_ptr, 4, byte_order); op_ptr += 4; break; case DW_OP_const8u: - result = extract_unsigned_integer (op_ptr, 8); + result = extract_unsigned_integer (op_ptr, 8, byte_order); op_ptr += 8; break; case DW_OP_const8s: - result = extract_signed_integer (op_ptr, 8); + result = extract_signed_integer (op_ptr, 8, byte_order); op_ptr += 8; break; case DW_OP_constu: @@ -383,25 +486,47 @@ execute_stack_op (struct dwarf_expr_context *ctx, case DW_OP_reg29: case DW_OP_reg30: case DW_OP_reg31: - if (op_ptr != op_end && *op_ptr != DW_OP_piece) + if (op_ptr != op_end + && *op_ptr != DW_OP_piece + && *op_ptr != DW_OP_bit_piece + && *op_ptr != DW_OP_GNU_uninit) error (_("DWARF-2 expression error: DW_OP_reg operations must be " - "used either alone or in conjuction with DW_OP_piece.")); + "used either alone or in conjuction with DW_OP_piece " + "or DW_OP_bit_piece.")); result = op - DW_OP_reg0; - ctx->in_reg = 1; - + ctx->location = DWARF_VALUE_REGISTER; break; case DW_OP_regx: op_ptr = read_uleb128 (op_ptr, op_end, ®); - if (op_ptr != op_end && *op_ptr != DW_OP_piece) - error (_("DWARF-2 expression error: DW_OP_reg operations must be " - "used either alone or in conjuction with DW_OP_piece.")); + dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx"); result = reg; - ctx->in_reg = 1; + ctx->location = DWARF_VALUE_REGISTER; break; + case DW_OP_implicit_value: + { + ULONGEST len; + + op_ptr = read_uleb128 (op_ptr, op_end, &len); + if (op_ptr + len > op_end) + error (_("DW_OP_implicit_value: too few bytes available.")); + ctx->len = len; + ctx->data = op_ptr; + ctx->location = DWARF_VALUE_LITERAL; + op_ptr += len; + dwarf_expr_require_composition (op_ptr, op_end, + "DW_OP_implicit_value"); + } + goto no_push; + + case DW_OP_stack_value: + ctx->location = DWARF_VALUE_STACK; + dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value"); + goto no_push; + case DW_OP_breg0: case DW_OP_breg1: case DW_OP_breg2: @@ -450,7 +575,7 @@ execute_stack_op (struct dwarf_expr_context *ctx, break; case DW_OP_fbreg: { - gdb_byte *datastart; + const gdb_byte *datastart; size_t datalen; unsigned int before_stack_len; @@ -465,16 +590,22 @@ execute_stack_op (struct dwarf_expr_context *ctx, specific this_base method. */ (ctx->get_frame_base) (ctx->baton, &datastart, &datalen); dwarf_expr_eval (ctx, datastart, datalen); - result = dwarf_expr_fetch (ctx, 0); - if (ctx->in_reg) - result = (ctx->read_reg) (ctx->baton, result); + if (ctx->location == DWARF_VALUE_MEMORY) + result = dwarf_expr_fetch_address (ctx, 0); + else if (ctx->location == DWARF_VALUE_REGISTER) + result = (ctx->read_reg) (ctx->baton, dwarf_expr_fetch (ctx, 0)); + else + error (_("Not implemented: computing frame base using explicit value operator")); result = result + offset; + in_stack_memory = 1; ctx->stack_len = before_stack_len; - ctx->in_reg = 0; + ctx->location = DWARF_VALUE_MEMORY; } break; + case DW_OP_dup: result = dwarf_expr_fetch (ctx, 0); + in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); break; case DW_OP_drop: @@ -484,15 +615,31 @@ execute_stack_op (struct dwarf_expr_context *ctx, case DW_OP_pick: offset = *op_ptr++; result = dwarf_expr_fetch (ctx, offset); + in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset); break; + + case DW_OP_swap: + { + struct dwarf_stack_value t1, t2; + + if (ctx->stack_len < 2) + error (_("Not enough elements for DW_OP_swap. Need 2, have %d."), + ctx->stack_len); + t1 = ctx->stack[ctx->stack_len - 1]; + t2 = ctx->stack[ctx->stack_len - 2]; + ctx->stack[ctx->stack_len - 1] = t2; + ctx->stack[ctx->stack_len - 2] = t1; + goto no_push; + } case DW_OP_over: result = dwarf_expr_fetch (ctx, 1); + in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1); break; case DW_OP_rot: { - CORE_ADDR t1, t2, t3; + struct dwarf_stack_value t1, t2, t3; if (ctx->stack_len < 3) error (_("Not enough elements for DW_OP_rot. Need 3, have %d."), @@ -508,6 +655,17 @@ execute_stack_op (struct dwarf_expr_context *ctx, case DW_OP_deref: case DW_OP_deref_size: + { + int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++); + gdb_byte *buf = alloca (addr_size); + CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0); + dwarf_expr_pop (ctx); + + (ctx->read_mem) (ctx->baton, buf, addr, addr_size); + result = extract_unsigned_integer (buf, addr_size, byte_order); + break; + } + case DW_OP_abs: case DW_OP_neg: case DW_OP_not: @@ -518,35 +676,8 @@ execute_stack_op (struct dwarf_expr_context *ctx, switch (op) { - case DW_OP_deref: - { - gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT); - int bytes_read; - - (ctx->read_mem) (ctx->baton, buf, result, - TARGET_ADDR_BIT / TARGET_CHAR_BIT); - result = dwarf2_read_address (buf, - buf + (TARGET_ADDR_BIT - / TARGET_CHAR_BIT), - &bytes_read); - } - break; - - case DW_OP_deref_size: - { - gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT); - int bytes_read; - - (ctx->read_mem) (ctx->baton, buf, result, *op_ptr++); - result = dwarf2_read_address (buf, - buf + (TARGET_ADDR_BIT - / TARGET_CHAR_BIT), - &bytes_read); - } - break; - case DW_OP_abs: - if ((signed int) result < 0) + if (sign_ext (result) < 0) result = -result; break; case DW_OP_neg: @@ -580,11 +711,8 @@ execute_stack_op (struct dwarf_expr_context *ctx, case DW_OP_gt: case DW_OP_ne: { - /* Binary operations. Use the value engine to do computations in - the right width. */ - CORE_ADDR first, second; - enum exp_opcode binop; - struct value *val1, *val2; + /* Binary operations. */ + ULONGEST first, second; second = dwarf_expr_fetch (ctx, 0); dwarf_expr_pop (ctx); @@ -592,71 +720,75 @@ execute_stack_op (struct dwarf_expr_context *ctx, first = dwarf_expr_fetch (ctx, 0); dwarf_expr_pop (ctx); - val1 = value_from_longest (unsigned_address_type (), first); - val2 = value_from_longest (unsigned_address_type (), second); - switch (op) { case DW_OP_and: - binop = BINOP_BITWISE_AND; + result = first & second; break; case DW_OP_div: - binop = BINOP_DIV; + if (!second) + error (_("Division by zero")); + result = sign_ext (first) / sign_ext (second); break; case DW_OP_minus: - binop = BINOP_SUB; + result = first - second; break; case DW_OP_mod: - binop = BINOP_MOD; + if (!second) + error (_("Division by zero")); + result = first % second; break; case DW_OP_mul: - binop = BINOP_MUL; + result = first * second; break; case DW_OP_or: - binop = BINOP_BITWISE_IOR; + result = first | second; break; case DW_OP_plus: - binop = BINOP_ADD; + result = first + second; break; case DW_OP_shl: - binop = BINOP_LSH; + result = first << second; break; case DW_OP_shr: - binop = BINOP_RSH; + result = first >> second; break; case DW_OP_shra: - binop = BINOP_RSH; - val1 = value_from_longest (signed_address_type (), first); + result = sign_ext (first) >> second; break; case DW_OP_xor: - binop = BINOP_BITWISE_XOR; + result = first ^ second; break; case DW_OP_le: - binop = BINOP_LEQ; + result = sign_ext (first) <= sign_ext (second); break; case DW_OP_ge: - binop = BINOP_GEQ; + result = sign_ext (first) >= sign_ext (second); break; case DW_OP_eq: - binop = BINOP_EQUAL; + result = sign_ext (first) == sign_ext (second); break; case DW_OP_lt: - binop = BINOP_LESS; + result = sign_ext (first) < sign_ext (second); break; case DW_OP_gt: - binop = BINOP_GTR; + result = sign_ext (first) > sign_ext (second); break; case DW_OP_ne: - binop = BINOP_NOTEQUAL; + result = sign_ext (first) != sign_ext (second); break; default: internal_error (__FILE__, __LINE__, _("Can't be reached.")); } - result = value_as_long (value_binop (val1, val2, binop)); } break; + case DW_OP_call_frame_cfa: + result = (ctx->get_frame_cfa) (ctx->baton); + in_stack_memory = 1; + break; + case DW_OP_GNU_push_tls_address: /* Variable is at a constant offset in the thread-local storage block into the objfile for the current thread and @@ -672,13 +804,13 @@ execute_stack_op (struct dwarf_expr_context *ctx, break; case DW_OP_skip: - offset = extract_signed_integer (op_ptr, 2); + offset = extract_signed_integer (op_ptr, 2, byte_order); op_ptr += 2; op_ptr += offset; goto no_push; case DW_OP_bra: - offset = extract_signed_integer (op_ptr, 2); + offset = extract_signed_integer (op_ptr, 2, byte_order); op_ptr += 2; if (dwarf_expr_fetch (ctx, 0) != 0) op_ptr += offset; @@ -691,25 +823,68 @@ execute_stack_op (struct dwarf_expr_context *ctx, case DW_OP_piece: { ULONGEST size; - CORE_ADDR addr_or_regnum; /* Record the piece. */ op_ptr = read_uleb128 (op_ptr, op_end, &size); - addr_or_regnum = dwarf_expr_fetch (ctx, 0); - add_piece (ctx, ctx->in_reg, addr_or_regnum, size); - - /* Pop off the address/regnum, and clear the in_reg flag. */ - dwarf_expr_pop (ctx); - ctx->in_reg = 0; + add_piece (ctx, 8 * size, 0); + + /* Pop off the address/regnum, and reset the location + type. */ + if (ctx->location != DWARF_VALUE_LITERAL + && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) + dwarf_expr_pop (ctx); + ctx->location = DWARF_VALUE_MEMORY; } goto no_push; + case DW_OP_bit_piece: + { + ULONGEST size, offset; + + /* Record the piece. */ + op_ptr = read_uleb128 (op_ptr, op_end, &size); + op_ptr = read_uleb128 (op_ptr, op_end, &offset); + add_piece (ctx, size, offset); + + /* Pop off the address/regnum, and reset the location + type. */ + if (ctx->location != DWARF_VALUE_LITERAL + && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) + dwarf_expr_pop (ctx); + ctx->location = DWARF_VALUE_MEMORY; + } + goto no_push; + + case DW_OP_GNU_uninit: + if (op_ptr != op_end) + error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always " + "be the very last op.")); + + ctx->initialized = 0; + goto no_push; + + case DW_OP_call2: + result = extract_unsigned_integer (op_ptr, 2, byte_order); + op_ptr += 2; + ctx->dwarf_call (ctx, result); + goto no_push; + + case DW_OP_call4: + result = extract_unsigned_integer (op_ptr, 4, byte_order); + op_ptr += 4; + ctx->dwarf_call (ctx, result); + goto no_push; + default: error (_("Unhandled dwarf expression opcode 0x%x"), op); } /* Most things push a result value. */ - dwarf_expr_push (ctx, result); + dwarf_expr_push (ctx, result, in_stack_memory); no_push:; } + + ctx->recursion_depth--; + gdb_assert (ctx->recursion_depth >= 0); +#undef sign_ext }