1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
28 #include "dwarf2expr.h"
30 /* Local prototypes. */
32 static void execute_stack_op (struct dwarf_expr_context
*,
33 const gdb_byte
*, const gdb_byte
*);
35 /* Cookie for gdbarch data. */
37 static struct gdbarch_data
*dwarf_arch_cookie
;
39 /* This holds gdbarch-specific types used by the DWARF expression
40 evaluator. See comments in execute_stack_op. */
42 struct dwarf_gdbarch_types
44 struct type
*dw_types
[3];
47 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50 dwarf_gdbarch_types_init (struct gdbarch
*gdbarch
)
52 struct dwarf_gdbarch_types
*types
53 = GDBARCH_OBSTACK_ZALLOC (gdbarch
, struct dwarf_gdbarch_types
);
55 /* The types themselves are lazily initialized. */
60 /* Return the type used for DWARF operations where the type is
61 unspecified in the DWARF spec. Only certain sizes are
65 dwarf_expr_address_type (struct dwarf_expr_context
*ctx
)
67 struct dwarf_gdbarch_types
*types
68 = (struct dwarf_gdbarch_types
*) gdbarch_data (ctx
->gdbarch
,
72 if (ctx
->addr_size
== 2)
74 else if (ctx
->addr_size
== 4)
76 else if (ctx
->addr_size
== 8)
79 error (_("Unsupported address size in DWARF expressions: %d bits"),
82 if (types
->dw_types
[ndx
] == NULL
)
84 = arch_integer_type (ctx
->gdbarch
,
86 0, "<signed DWARF address type>");
88 return types
->dw_types
[ndx
];
91 /* Create a new context for the expression evaluator. */
93 struct dwarf_expr_context
*
94 new_dwarf_expr_context (void)
96 struct dwarf_expr_context
*retval
;
98 retval
= XCNEW (struct dwarf_expr_context
);
99 retval
->stack_len
= 0;
100 retval
->stack_allocated
= 10;
101 retval
->stack
= XNEWVEC (struct dwarf_stack_value
, retval
->stack_allocated
);
102 retval
->num_pieces
= 0;
104 retval
->max_recursion_depth
= 0x100;
108 /* Release the memory allocated to CTX. */
111 free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
118 /* Helper for make_cleanup_free_dwarf_expr_context. */
121 free_dwarf_expr_context_cleanup (void *arg
)
123 free_dwarf_expr_context ((struct dwarf_expr_context
*) arg
);
126 /* Return a cleanup that calls free_dwarf_expr_context. */
129 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
131 return make_cleanup (free_dwarf_expr_context_cleanup
, ctx
);
134 /* Expand the memory allocated to CTX's stack to contain at least
135 NEED more elements than are currently used. */
138 dwarf_expr_grow_stack (struct dwarf_expr_context
*ctx
, size_t need
)
140 if (ctx
->stack_len
+ need
> ctx
->stack_allocated
)
142 size_t newlen
= ctx
->stack_len
+ need
+ 10;
144 ctx
->stack
= XRESIZEVEC (struct dwarf_stack_value
, ctx
->stack
, newlen
);
145 ctx
->stack_allocated
= newlen
;
149 /* Push VALUE onto CTX's stack. */
152 dwarf_expr_push (struct dwarf_expr_context
*ctx
, struct value
*value
,
155 struct dwarf_stack_value
*v
;
157 dwarf_expr_grow_stack (ctx
, 1);
158 v
= &ctx
->stack
[ctx
->stack_len
++];
160 v
->in_stack_memory
= in_stack_memory
;
163 /* Push VALUE onto CTX's stack. */
166 dwarf_expr_push_address (struct dwarf_expr_context
*ctx
, CORE_ADDR value
,
169 dwarf_expr_push (ctx
,
170 value_from_ulongest (dwarf_expr_address_type (ctx
), value
),
174 /* Pop the top item off of CTX's stack. */
177 dwarf_expr_pop (struct dwarf_expr_context
*ctx
)
179 if (ctx
->stack_len
<= 0)
180 error (_("dwarf expression stack underflow"));
184 /* Retrieve the N'th item on CTX's stack. */
187 dwarf_expr_fetch (struct dwarf_expr_context
*ctx
, int n
)
189 if (ctx
->stack_len
<= n
)
190 error (_("Asked for position %d of stack, "
191 "stack only has %d elements on it."),
193 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].value
;
196 /* Require that TYPE be an integral type; throw an exception if not. */
199 dwarf_require_integral (struct type
*type
)
201 if (TYPE_CODE (type
) != TYPE_CODE_INT
202 && TYPE_CODE (type
) != TYPE_CODE_CHAR
203 && TYPE_CODE (type
) != TYPE_CODE_BOOL
)
204 error (_("integral type expected in DWARF expression"));
207 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 get_unsigned_type (struct gdbarch
*gdbarch
, struct type
*type
)
213 switch (TYPE_LENGTH (type
))
216 return builtin_type (gdbarch
)->builtin_uint8
;
218 return builtin_type (gdbarch
)->builtin_uint16
;
220 return builtin_type (gdbarch
)->builtin_uint32
;
222 return builtin_type (gdbarch
)->builtin_uint64
;
224 error (_("no unsigned variant found for type, while evaluating "
225 "DWARF expression"));
229 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 get_signed_type (struct gdbarch
*gdbarch
, struct type
*type
)
235 switch (TYPE_LENGTH (type
))
238 return builtin_type (gdbarch
)->builtin_int8
;
240 return builtin_type (gdbarch
)->builtin_int16
;
242 return builtin_type (gdbarch
)->builtin_int32
;
244 return builtin_type (gdbarch
)->builtin_int64
;
246 error (_("no signed variant found for type, while evaluating "
247 "DWARF expression"));
251 /* Retrieve the N'th item on CTX's stack, converted to an address. */
254 dwarf_expr_fetch_address (struct dwarf_expr_context
*ctx
, int n
)
256 struct value
*result_val
= dwarf_expr_fetch (ctx
, n
);
257 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
260 dwarf_require_integral (value_type (result_val
));
261 result
= extract_unsigned_integer (value_contents (result_val
),
262 TYPE_LENGTH (value_type (result_val
)),
265 /* For most architectures, calling extract_unsigned_integer() alone
266 is sufficient for extracting an address. However, some
267 architectures (e.g. MIPS) use signed addresses and using
268 extract_unsigned_integer() will not produce a correct
269 result. Make sure we invoke gdbarch_integer_to_address()
270 for those architectures which require it. */
271 if (gdbarch_integer_to_address_p (ctx
->gdbarch
))
273 gdb_byte
*buf
= (gdb_byte
*) alloca (ctx
->addr_size
);
274 struct type
*int_type
= get_unsigned_type (ctx
->gdbarch
,
275 value_type (result_val
));
277 store_unsigned_integer (buf
, ctx
->addr_size
, byte_order
, result
);
278 return gdbarch_integer_to_address (ctx
->gdbarch
, int_type
, buf
);
281 return (CORE_ADDR
) result
;
284 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
287 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context
*ctx
, int n
)
289 if (ctx
->stack_len
<= n
)
290 error (_("Asked for position %d of stack, "
291 "stack only has %d elements on it."),
293 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].in_stack_memory
;
296 /* Return true if the expression stack is empty. */
299 dwarf_expr_stack_empty_p (struct dwarf_expr_context
*ctx
)
301 return ctx
->stack_len
== 0;
304 /* Add a new piece to CTX's piece list. */
306 add_piece (struct dwarf_expr_context
*ctx
, ULONGEST size
, ULONGEST offset
)
308 struct dwarf_expr_piece
*p
;
313 = XRESIZEVEC (struct dwarf_expr_piece
, ctx
->pieces
, ctx
->num_pieces
);
315 p
= &ctx
->pieces
[ctx
->num_pieces
- 1];
316 p
->location
= ctx
->location
;
320 if (p
->location
== DWARF_VALUE_LITERAL
)
322 p
->v
.literal
.data
= ctx
->data
;
323 p
->v
.literal
.length
= ctx
->len
;
325 else if (dwarf_expr_stack_empty_p (ctx
))
327 p
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
328 /* Also reset the context's location, for our callers. This is
329 a somewhat strange approach, but this lets us avoid setting
330 the location to DWARF_VALUE_MEMORY in all the individual
331 cases in the evaluator. */
332 ctx
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
334 else if (p
->location
== DWARF_VALUE_MEMORY
)
336 p
->v
.mem
.addr
= dwarf_expr_fetch_address (ctx
, 0);
337 p
->v
.mem
.in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
339 else if (p
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
341 p
->v
.ptr
.die
.sect_off
= ctx
->len
;
342 p
->v
.ptr
.offset
= value_as_long (dwarf_expr_fetch (ctx
, 0));
344 else if (p
->location
== DWARF_VALUE_REGISTER
)
345 p
->v
.regno
= value_as_long (dwarf_expr_fetch (ctx
, 0));
348 p
->v
.value
= dwarf_expr_fetch (ctx
, 0);
352 /* Evaluate the expression at ADDR (LEN bytes long) using the context
356 dwarf_expr_eval (struct dwarf_expr_context
*ctx
, const gdb_byte
*addr
,
359 int old_recursion_depth
= ctx
->recursion_depth
;
361 execute_stack_op (ctx
, addr
, addr
+ len
);
363 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
365 gdb_assert (ctx
->recursion_depth
== old_recursion_depth
);
368 /* Helper to read a uleb128 value or throw an error. */
371 safe_read_uleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
374 buf
= gdb_read_uleb128 (buf
, buf_end
, r
);
376 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
380 /* Helper to read a sleb128 value or throw an error. */
383 safe_read_sleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
386 buf
= gdb_read_sleb128 (buf
, buf_end
, r
);
388 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
393 safe_skip_leb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
395 buf
= gdb_skip_leb128 (buf
, buf_end
);
397 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
402 /* Check that the current operator is either at the end of an
403 expression, or that it is followed by a composition operator. */
406 dwarf_expr_require_composition (const gdb_byte
*op_ptr
, const gdb_byte
*op_end
,
409 /* It seems like DW_OP_GNU_uninit should be handled here. However,
410 it doesn't seem to make sense for DW_OP_*_value, and it was not
411 checked at the other place that this function is called. */
412 if (op_ptr
!= op_end
&& *op_ptr
!= DW_OP_piece
&& *op_ptr
!= DW_OP_bit_piece
)
413 error (_("DWARF-2 expression error: `%s' operations must be "
414 "used either alone or in conjunction with DW_OP_piece "
415 "or DW_OP_bit_piece."),
419 /* Return true iff the types T1 and T2 are "the same". This only does
420 checks that might reasonably be needed to compare DWARF base
424 base_types_equal_p (struct type
*t1
, struct type
*t2
)
426 if (TYPE_CODE (t1
) != TYPE_CODE (t2
))
428 if (TYPE_UNSIGNED (t1
) != TYPE_UNSIGNED (t2
))
430 return TYPE_LENGTH (t1
) == TYPE_LENGTH (t2
);
433 /* A convenience function to call get_base_type on CTX and return the
434 result. DIE is the DIE whose type we need. SIZE is non-zero if
435 this function should verify that the resulting type has the correct
439 dwarf_get_base_type (struct dwarf_expr_context
*ctx
, cu_offset die
, int size
)
443 if (ctx
->funcs
->get_base_type
)
445 result
= ctx
->funcs
->get_base_type (ctx
, die
);
447 error (_("Could not find type for DW_OP_GNU_const_type"));
448 if (size
!= 0 && TYPE_LENGTH (result
) != size
)
449 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
452 /* Anything will do. */
453 result
= builtin_type (ctx
->gdbarch
)->builtin_int
;
458 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
459 DWARF register number. Otherwise return -1. */
462 dwarf_block_to_dwarf_reg (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
468 if (*buf
>= DW_OP_reg0
&& *buf
<= DW_OP_reg31
)
470 if (buf_end
- buf
!= 1)
472 return *buf
- DW_OP_reg0
;
475 if (*buf
== DW_OP_GNU_regval_type
)
478 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
481 buf
= gdb_skip_leb128 (buf
, buf_end
);
485 else if (*buf
== DW_OP_regx
)
488 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
494 if (buf
!= buf_end
|| (int) dwarf_reg
!= dwarf_reg
)
499 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
500 DW_OP_deref* return the DWARF register number. Otherwise return -1.
501 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
502 size from DW_OP_deref_size. */
505 dwarf_block_to_dwarf_reg_deref (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
506 CORE_ADDR
*deref_size_return
)
514 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
516 dwarf_reg
= *buf
- DW_OP_breg0
;
521 else if (*buf
== DW_OP_bregx
)
524 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
527 if ((int) dwarf_reg
!= dwarf_reg
)
533 buf
= gdb_read_sleb128 (buf
, buf_end
, &offset
);
539 if (*buf
== DW_OP_deref
)
542 *deref_size_return
= -1;
544 else if (*buf
== DW_OP_deref_size
)
549 *deref_size_return
= *buf
++;
560 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
561 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
564 dwarf_block_to_fb_offset (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
565 CORE_ADDR
*fb_offset_return
)
572 if (*buf
!= DW_OP_fbreg
)
576 buf
= gdb_read_sleb128 (buf
, buf_end
, &fb_offset
);
579 *fb_offset_return
= fb_offset
;
580 if (buf
!= buf_end
|| fb_offset
!= (LONGEST
) *fb_offset_return
)
586 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
587 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
588 The matched SP register number depends on GDBARCH. */
591 dwarf_block_to_sp_offset (struct gdbarch
*gdbarch
, const gdb_byte
*buf
,
592 const gdb_byte
*buf_end
, CORE_ADDR
*sp_offset_return
)
599 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
601 dwarf_reg
= *buf
- DW_OP_breg0
;
606 if (*buf
!= DW_OP_bregx
)
609 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
614 if (gdbarch_dwarf2_reg_to_regnum (gdbarch
, dwarf_reg
)
615 != gdbarch_sp_regnum (gdbarch
))
618 buf
= gdb_read_sleb128 (buf
, buf_end
, &sp_offset
);
621 *sp_offset_return
= sp_offset
;
622 if (buf
!= buf_end
|| sp_offset
!= (LONGEST
) *sp_offset_return
)
628 /* The engine for the expression evaluator. Using the context in CTX,
629 evaluate the expression between OP_PTR and OP_END. */
632 execute_stack_op (struct dwarf_expr_context
*ctx
,
633 const gdb_byte
*op_ptr
, const gdb_byte
*op_end
)
635 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
636 /* Old-style "untyped" DWARF values need special treatment in a
637 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
638 a special type for these values so we can distinguish them from
639 values that have an explicit type, because explicitly-typed
640 values do not need special treatment. This special type must be
641 different (in the `==' sense) from any base type coming from the
643 struct type
*address_type
= dwarf_expr_address_type (ctx
);
645 ctx
->location
= DWARF_VALUE_MEMORY
;
646 ctx
->initialized
= 1; /* Default is initialized. */
648 if (ctx
->recursion_depth
> ctx
->max_recursion_depth
)
649 error (_("DWARF-2 expression error: Loop detected (%d)."),
650 ctx
->recursion_depth
);
651 ctx
->recursion_depth
++;
653 while (op_ptr
< op_end
)
655 enum dwarf_location_atom op
= (enum dwarf_location_atom
) *op_ptr
++;
657 /* Assume the value is not in stack memory.
658 Code that knows otherwise sets this to 1.
659 Some arithmetic on stack addresses can probably be assumed to still
660 be a stack address, but we skip this complication for now.
661 This is just an optimization, so it's always ok to punt
662 and leave this as 0. */
663 int in_stack_memory
= 0;
664 uint64_t uoffset
, reg
;
666 struct value
*result_val
= NULL
;
668 /* The DWARF expression might have a bug causing an infinite
669 loop. In that case, quitting is the only way out. */
706 result
= op
- DW_OP_lit0
;
707 result_val
= value_from_ulongest (address_type
, result
);
711 result
= extract_unsigned_integer (op_ptr
,
712 ctx
->addr_size
, byte_order
);
713 op_ptr
+= ctx
->addr_size
;
714 /* Some versions of GCC emit DW_OP_addr before
715 DW_OP_GNU_push_tls_address. In this case the value is an
716 index, not an address. We don't support things like
717 branching between the address and the TLS op. */
718 if (op_ptr
>= op_end
|| *op_ptr
!= DW_OP_GNU_push_tls_address
)
719 result
+= ctx
->offset
;
720 result_val
= value_from_ulongest (address_type
, result
);
723 case DW_OP_GNU_addr_index
:
724 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
725 result
= (ctx
->funcs
->get_addr_index
) (ctx
->baton
, uoffset
);
726 result
+= ctx
->offset
;
727 result_val
= value_from_ulongest (address_type
, result
);
729 case DW_OP_GNU_const_index
:
730 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
731 result
= (ctx
->funcs
->get_addr_index
) (ctx
->baton
, uoffset
);
732 result_val
= value_from_ulongest (address_type
, result
);
736 result
= extract_unsigned_integer (op_ptr
, 1, byte_order
);
737 result_val
= value_from_ulongest (address_type
, result
);
741 result
= extract_signed_integer (op_ptr
, 1, byte_order
);
742 result_val
= value_from_ulongest (address_type
, result
);
746 result
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
747 result_val
= value_from_ulongest (address_type
, result
);
751 result
= extract_signed_integer (op_ptr
, 2, byte_order
);
752 result_val
= value_from_ulongest (address_type
, result
);
756 result
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
757 result_val
= value_from_ulongest (address_type
, result
);
761 result
= extract_signed_integer (op_ptr
, 4, byte_order
);
762 result_val
= value_from_ulongest (address_type
, result
);
766 result
= extract_unsigned_integer (op_ptr
, 8, byte_order
);
767 result_val
= value_from_ulongest (address_type
, result
);
771 result
= extract_signed_integer (op_ptr
, 8, byte_order
);
772 result_val
= value_from_ulongest (address_type
, result
);
776 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
778 result_val
= value_from_ulongest (address_type
, result
);
781 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
783 result_val
= value_from_ulongest (address_type
, result
);
786 /* The DW_OP_reg operations are required to occur alone in
787 location expressions. */
821 && *op_ptr
!= DW_OP_piece
822 && *op_ptr
!= DW_OP_bit_piece
823 && *op_ptr
!= DW_OP_GNU_uninit
)
824 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
825 "used either alone or in conjunction with DW_OP_piece "
826 "or DW_OP_bit_piece."));
828 result
= op
- DW_OP_reg0
;
829 result_val
= value_from_ulongest (address_type
, result
);
830 ctx
->location
= DWARF_VALUE_REGISTER
;
834 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
835 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_regx");
838 result_val
= value_from_ulongest (address_type
, result
);
839 ctx
->location
= DWARF_VALUE_REGISTER
;
842 case DW_OP_implicit_value
:
846 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
847 if (op_ptr
+ len
> op_end
)
848 error (_("DW_OP_implicit_value: too few bytes available."));
851 ctx
->location
= DWARF_VALUE_LITERAL
;
853 dwarf_expr_require_composition (op_ptr
, op_end
,
854 "DW_OP_implicit_value");
858 case DW_OP_stack_value
:
859 ctx
->location
= DWARF_VALUE_STACK
;
860 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_stack_value");
863 case DW_OP_GNU_implicit_pointer
:
867 if (ctx
->ref_addr_size
== -1)
868 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
869 "is not allowed in frame context"));
871 /* The referred-to DIE of sect_offset kind. */
872 ctx
->len
= extract_unsigned_integer (op_ptr
, ctx
->ref_addr_size
,
874 op_ptr
+= ctx
->ref_addr_size
;
876 /* The byte offset into the data. */
877 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &len
);
878 result
= (ULONGEST
) len
;
879 result_val
= value_from_ulongest (address_type
, result
);
881 ctx
->location
= DWARF_VALUE_IMPLICIT_POINTER
;
882 dwarf_expr_require_composition (op_ptr
, op_end
,
883 "DW_OP_GNU_implicit_pointer");
920 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
921 result
= (ctx
->funcs
->read_addr_from_reg
) (ctx
->baton
,
924 result_val
= value_from_ulongest (address_type
, result
);
929 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
930 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
931 result
= (ctx
->funcs
->read_addr_from_reg
) (ctx
->baton
, reg
);
933 result_val
= value_from_ulongest (address_type
, result
);
938 const gdb_byte
*datastart
;
940 unsigned int before_stack_len
;
942 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
943 /* Rather than create a whole new context, we simply
944 record the stack length before execution, then reset it
945 afterwards, effectively erasing whatever the recursive
947 before_stack_len
= ctx
->stack_len
;
948 /* FIXME: cagney/2003-03-26: This code should be using
949 get_frame_base_address(), and then implement a dwarf2
950 specific this_base method. */
951 (ctx
->funcs
->get_frame_base
) (ctx
->baton
, &datastart
, &datalen
);
952 dwarf_expr_eval (ctx
, datastart
, datalen
);
953 if (ctx
->location
== DWARF_VALUE_MEMORY
)
954 result
= dwarf_expr_fetch_address (ctx
, 0);
955 else if (ctx
->location
== DWARF_VALUE_REGISTER
)
956 result
= (ctx
->funcs
->read_addr_from_reg
)
958 value_as_long (dwarf_expr_fetch (ctx
, 0)));
960 error (_("Not implemented: computing frame "
961 "base using explicit value operator"));
962 result
= result
+ offset
;
963 result_val
= value_from_ulongest (address_type
, result
);
965 ctx
->stack_len
= before_stack_len
;
966 ctx
->location
= DWARF_VALUE_MEMORY
;
971 result_val
= dwarf_expr_fetch (ctx
, 0);
972 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
976 dwarf_expr_pop (ctx
);
981 result_val
= dwarf_expr_fetch (ctx
, offset
);
982 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, offset
);
987 struct dwarf_stack_value t1
, t2
;
989 if (ctx
->stack_len
< 2)
990 error (_("Not enough elements for "
991 "DW_OP_swap. Need 2, have %d."),
993 t1
= ctx
->stack
[ctx
->stack_len
- 1];
994 t2
= ctx
->stack
[ctx
->stack_len
- 2];
995 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
996 ctx
->stack
[ctx
->stack_len
- 2] = t1
;
1001 result_val
= dwarf_expr_fetch (ctx
, 1);
1002 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 1);
1007 struct dwarf_stack_value t1
, t2
, t3
;
1009 if (ctx
->stack_len
< 3)
1010 error (_("Not enough elements for "
1011 "DW_OP_rot. Need 3, have %d."),
1013 t1
= ctx
->stack
[ctx
->stack_len
- 1];
1014 t2
= ctx
->stack
[ctx
->stack_len
- 2];
1015 t3
= ctx
->stack
[ctx
->stack_len
- 3];
1016 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
1017 ctx
->stack
[ctx
->stack_len
- 2] = t3
;
1018 ctx
->stack
[ctx
->stack_len
- 3] = t1
;
1023 case DW_OP_deref_size
:
1024 case DW_OP_GNU_deref_type
:
1026 int addr_size
= (op
== DW_OP_deref
? ctx
->addr_size
: *op_ptr
++);
1027 gdb_byte
*buf
= (gdb_byte
*) alloca (addr_size
);
1028 CORE_ADDR addr
= dwarf_expr_fetch_address (ctx
, 0);
1031 dwarf_expr_pop (ctx
);
1033 if (op
== DW_OP_GNU_deref_type
)
1037 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1038 type_die
.cu_off
= uoffset
;
1039 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1042 type
= address_type
;
1044 (ctx
->funcs
->read_mem
) (ctx
->baton
, buf
, addr
, addr_size
);
1046 /* If the size of the object read from memory is different
1047 from the type length, we need to zero-extend it. */
1048 if (TYPE_LENGTH (type
) != addr_size
)
1051 extract_unsigned_integer (buf
, addr_size
, byte_order
);
1053 buf
= (gdb_byte
*) alloca (TYPE_LENGTH (type
));
1054 store_unsigned_integer (buf
, TYPE_LENGTH (type
),
1055 byte_order
, result
);
1058 result_val
= value_from_contents_and_address (type
, buf
, addr
);
1065 case DW_OP_plus_uconst
:
1067 /* Unary operations. */
1068 result_val
= dwarf_expr_fetch (ctx
, 0);
1069 dwarf_expr_pop (ctx
);
1074 if (value_less (result_val
,
1075 value_zero (value_type (result_val
), not_lval
)))
1076 result_val
= value_neg (result_val
);
1079 result_val
= value_neg (result_val
);
1082 dwarf_require_integral (value_type (result_val
));
1083 result_val
= value_complement (result_val
);
1085 case DW_OP_plus_uconst
:
1086 dwarf_require_integral (value_type (result_val
));
1087 result
= value_as_long (result_val
);
1088 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1090 result_val
= value_from_ulongest (address_type
, result
);
1114 /* Binary operations. */
1115 struct value
*first
, *second
;
1117 second
= dwarf_expr_fetch (ctx
, 0);
1118 dwarf_expr_pop (ctx
);
1120 first
= dwarf_expr_fetch (ctx
, 0);
1121 dwarf_expr_pop (ctx
);
1123 if (! base_types_equal_p (value_type (first
), value_type (second
)))
1124 error (_("Incompatible types on DWARF stack"));
1129 dwarf_require_integral (value_type (first
));
1130 dwarf_require_integral (value_type (second
));
1131 result_val
= value_binop (first
, second
, BINOP_BITWISE_AND
);
1134 result_val
= value_binop (first
, second
, BINOP_DIV
);
1137 result_val
= value_binop (first
, second
, BINOP_SUB
);
1142 struct type
*orig_type
= value_type (first
);
1144 /* We have to special-case "old-style" untyped values
1145 -- these must have mod computed using unsigned
1147 if (orig_type
== address_type
)
1150 = get_unsigned_type (ctx
->gdbarch
, orig_type
);
1153 first
= value_cast (utype
, first
);
1154 second
= value_cast (utype
, second
);
1156 /* Note that value_binop doesn't handle float or
1157 decimal float here. This seems unimportant. */
1158 result_val
= value_binop (first
, second
, BINOP_MOD
);
1160 result_val
= value_cast (orig_type
, result_val
);
1164 result_val
= value_binop (first
, second
, BINOP_MUL
);
1167 dwarf_require_integral (value_type (first
));
1168 dwarf_require_integral (value_type (second
));
1169 result_val
= value_binop (first
, second
, BINOP_BITWISE_IOR
);
1172 result_val
= value_binop (first
, second
, BINOP_ADD
);
1175 dwarf_require_integral (value_type (first
));
1176 dwarf_require_integral (value_type (second
));
1177 result_val
= value_binop (first
, second
, BINOP_LSH
);
1180 dwarf_require_integral (value_type (first
));
1181 dwarf_require_integral (value_type (second
));
1182 if (!TYPE_UNSIGNED (value_type (first
)))
1185 = get_unsigned_type (ctx
->gdbarch
, value_type (first
));
1187 first
= value_cast (utype
, first
);
1190 result_val
= value_binop (first
, second
, BINOP_RSH
);
1191 /* Make sure we wind up with the same type we started
1193 if (value_type (result_val
) != value_type (second
))
1194 result_val
= value_cast (value_type (second
), result_val
);
1197 dwarf_require_integral (value_type (first
));
1198 dwarf_require_integral (value_type (second
));
1199 if (TYPE_UNSIGNED (value_type (first
)))
1202 = get_signed_type (ctx
->gdbarch
, value_type (first
));
1204 first
= value_cast (stype
, first
);
1207 result_val
= value_binop (first
, second
, BINOP_RSH
);
1208 /* Make sure we wind up with the same type we started
1210 if (value_type (result_val
) != value_type (second
))
1211 result_val
= value_cast (value_type (second
), result_val
);
1214 dwarf_require_integral (value_type (first
));
1215 dwarf_require_integral (value_type (second
));
1216 result_val
= value_binop (first
, second
, BINOP_BITWISE_XOR
);
1219 /* A <= B is !(B < A). */
1220 result
= ! value_less (second
, first
);
1221 result_val
= value_from_ulongest (address_type
, result
);
1224 /* A >= B is !(A < B). */
1225 result
= ! value_less (first
, second
);
1226 result_val
= value_from_ulongest (address_type
, result
);
1229 result
= value_equal (first
, second
);
1230 result_val
= value_from_ulongest (address_type
, result
);
1233 result
= value_less (first
, second
);
1234 result_val
= value_from_ulongest (address_type
, result
);
1237 /* A > B is B < A. */
1238 result
= value_less (second
, first
);
1239 result_val
= value_from_ulongest (address_type
, result
);
1242 result
= ! value_equal (first
, second
);
1243 result_val
= value_from_ulongest (address_type
, result
);
1246 internal_error (__FILE__
, __LINE__
,
1247 _("Can't be reached."));
1252 case DW_OP_call_frame_cfa
:
1253 result
= (ctx
->funcs
->get_frame_cfa
) (ctx
->baton
);
1254 result_val
= value_from_ulongest (address_type
, result
);
1255 in_stack_memory
= 1;
1258 case DW_OP_GNU_push_tls_address
:
1259 /* Variable is at a constant offset in the thread-local
1260 storage block into the objfile for the current thread and
1261 the dynamic linker module containing this expression. Here
1262 we return returns the offset from that base. The top of the
1263 stack has the offset from the beginning of the thread
1264 control block at which the variable is located. Nothing
1265 should follow this operator, so the top of stack would be
1267 result
= value_as_long (dwarf_expr_fetch (ctx
, 0));
1268 dwarf_expr_pop (ctx
);
1269 result
= (ctx
->funcs
->get_tls_address
) (ctx
->baton
, result
);
1270 result_val
= value_from_ulongest (address_type
, result
);
1274 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1283 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1285 val
= dwarf_expr_fetch (ctx
, 0);
1286 dwarf_require_integral (value_type (val
));
1287 if (value_as_long (val
) != 0)
1289 dwarf_expr_pop (ctx
);
1300 /* Record the piece. */
1301 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
1302 add_piece (ctx
, 8 * size
, 0);
1304 /* Pop off the address/regnum, and reset the location
1306 if (ctx
->location
!= DWARF_VALUE_LITERAL
1307 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1308 dwarf_expr_pop (ctx
);
1309 ctx
->location
= DWARF_VALUE_MEMORY
;
1313 case DW_OP_bit_piece
:
1315 uint64_t size
, offset
;
1317 /* Record the piece. */
1318 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
1319 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &offset
);
1320 add_piece (ctx
, size
, offset
);
1322 /* Pop off the address/regnum, and reset the location
1324 if (ctx
->location
!= DWARF_VALUE_LITERAL
1325 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1326 dwarf_expr_pop (ctx
);
1327 ctx
->location
= DWARF_VALUE_MEMORY
;
1331 case DW_OP_GNU_uninit
:
1332 if (op_ptr
!= op_end
)
1333 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1334 "be the very last op."));
1336 ctx
->initialized
= 0;
1343 offset
.cu_off
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
1345 ctx
->funcs
->dwarf_call (ctx
, offset
);
1353 offset
.cu_off
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
1355 ctx
->funcs
->dwarf_call (ctx
, offset
);
1359 case DW_OP_GNU_entry_value
:
1362 CORE_ADDR deref_size
;
1363 union call_site_parameter_u kind_u
;
1365 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
1366 if (op_ptr
+ len
> op_end
)
1367 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1369 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg (op_ptr
, op_ptr
+ len
);
1370 if (kind_u
.dwarf_reg
!= -1)
1373 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1374 CALL_SITE_PARAMETER_DWARF_REG
,
1376 -1 /* deref_size */);
1380 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg_deref (op_ptr
,
1383 if (kind_u
.dwarf_reg
!= -1)
1385 if (deref_size
== -1)
1386 deref_size
= ctx
->addr_size
;
1388 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1389 CALL_SITE_PARAMETER_DWARF_REG
,
1390 kind_u
, deref_size
);
1394 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1395 "supported only for single DW_OP_reg* "
1396 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1399 case DW_OP_GNU_parameter_ref
:
1401 union call_site_parameter_u kind_u
;
1403 kind_u
.param_offset
.cu_off
= extract_unsigned_integer (op_ptr
, 4,
1406 ctx
->funcs
->push_dwarf_reg_entry_value (ctx
,
1407 CALL_SITE_PARAMETER_PARAM_OFFSET
,
1409 -1 /* deref_size */);
1413 case DW_OP_GNU_const_type
:
1417 const gdb_byte
*data
;
1420 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1421 type_die
.cu_off
= uoffset
;
1426 type
= dwarf_get_base_type (ctx
, type_die
, n
);
1427 result_val
= value_from_contents (type
, data
);
1431 case DW_OP_GNU_regval_type
:
1436 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1437 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1438 type_die
.cu_off
= uoffset
;
1440 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1441 result_val
= ctx
->funcs
->get_reg_value (ctx
->baton
, type
, reg
);
1445 case DW_OP_GNU_convert
:
1446 case DW_OP_GNU_reinterpret
:
1451 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1452 type_die
.cu_off
= uoffset
;
1454 if (type_die
.cu_off
== 0)
1455 type
= address_type
;
1457 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1459 result_val
= dwarf_expr_fetch (ctx
, 0);
1460 dwarf_expr_pop (ctx
);
1462 if (op
== DW_OP_GNU_convert
)
1463 result_val
= value_cast (type
, result_val
);
1464 else if (type
== value_type (result_val
))
1468 else if (TYPE_LENGTH (type
)
1469 != TYPE_LENGTH (value_type (result_val
)))
1470 error (_("DW_OP_GNU_reinterpret has wrong size"));
1473 = value_from_contents (type
,
1474 value_contents_all (result_val
));
1478 case DW_OP_push_object_address
:
1479 /* Return the address of the object we are currently observing. */
1480 result
= (ctx
->funcs
->get_object_address
) (ctx
->baton
);
1481 result_val
= value_from_ulongest (address_type
, result
);
1485 error (_("Unhandled dwarf expression opcode 0x%x"), op
);
1488 /* Most things push a result value. */
1489 gdb_assert (result_val
!= NULL
);
1490 dwarf_expr_push (ctx
, result_val
, in_stack_memory
);
1495 /* To simplify our main caller, if the result is an implicit
1496 pointer, then make a pieced value. This is ok because we can't
1497 have implicit pointers in contexts where pieces are invalid. */
1498 if (ctx
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
1499 add_piece (ctx
, 8 * ctx
->addr_size
, 0);
1502 ctx
->recursion_depth
--;
1503 gdb_assert (ctx
->recursion_depth
>= 0);
1506 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1509 ctx_no_get_frame_base (void *baton
, const gdb_byte
**start
, size_t *length
)
1511 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1514 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1517 ctx_no_get_frame_cfa (void *baton
)
1519 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1522 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1525 ctx_no_get_frame_pc (void *baton
)
1527 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1530 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1533 ctx_no_get_tls_address (void *baton
, CORE_ADDR offset
)
1535 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1538 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1541 ctx_no_dwarf_call (struct dwarf_expr_context
*ctx
, cu_offset die_offset
)
1543 error (_("%s is invalid in this context"), "DW_OP_call*");
1546 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1549 ctx_no_get_base_type (struct dwarf_expr_context
*ctx
, cu_offset die
)
1551 error (_("Support for typed DWARF is not supported in this context"));
1554 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1558 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context
*ctx
,
1559 enum call_site_parameter_kind kind
,
1560 union call_site_parameter_u kind_u
,
1563 internal_error (__FILE__
, __LINE__
,
1564 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1567 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1570 ctx_no_get_addr_index (void *baton
, unsigned int index
)
1572 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1575 /* Provide a prototype to silence -Wmissing-prototypes. */
1576 extern initialize_file_ftype _initialize_dwarf2expr
;
1579 _initialize_dwarf2expr (void)
1582 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init
);