1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 Contributed by Daniel Berlin (dan@dberlin.org)
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
32 /* Local prototypes. */
34 static void execute_stack_op (struct dwarf_expr_context
*,
35 const gdb_byte
*, const gdb_byte
*);
37 /* Cookie for gdbarch data. */
39 static struct gdbarch_data
*dwarf_arch_cookie
;
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
44 struct dwarf_gdbarch_types
46 struct type
*dw_types
[3];
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
52 dwarf_gdbarch_types_init (struct gdbarch
*gdbarch
)
54 struct dwarf_gdbarch_types
*types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch
, struct dwarf_gdbarch_types
);
57 /* The types themselves are lazily initialized. */
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
67 dwarf_expr_address_type (struct dwarf_expr_context
*ctx
)
69 struct dwarf_gdbarch_types
*types
= gdbarch_data (ctx
->gdbarch
,
73 if (ctx
->addr_size
== 2)
75 else if (ctx
->addr_size
== 4)
77 else if (ctx
->addr_size
== 8)
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
83 if (types
->dw_types
[ndx
] == NULL
)
85 = arch_integer_type (ctx
->gdbarch
,
87 0, "<signed DWARF address type>");
89 return types
->dw_types
[ndx
];
92 /* Create a new context for the expression evaluator. */
94 struct dwarf_expr_context
*
95 new_dwarf_expr_context (void)
97 struct dwarf_expr_context
*retval
;
99 retval
= xcalloc (1, sizeof (struct dwarf_expr_context
));
100 retval
->stack_len
= 0;
101 retval
->stack_allocated
= 10;
102 retval
->stack
= xmalloc (retval
->stack_allocated
103 * sizeof (struct dwarf_stack_value
));
104 retval
->num_pieces
= 0;
106 retval
->max_recursion_depth
= 0x100;
110 /* Release the memory allocated to CTX. */
113 free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
123 free_dwarf_expr_context_cleanup (void *arg
)
125 free_dwarf_expr_context (arg
);
128 /* Return a cleanup that calls free_dwarf_expr_context. */
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context
*ctx
)
133 return make_cleanup (free_dwarf_expr_context_cleanup
, ctx
);
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
140 dwarf_expr_grow_stack (struct dwarf_expr_context
*ctx
, size_t need
)
142 if (ctx
->stack_len
+ need
> ctx
->stack_allocated
)
144 size_t newlen
= ctx
->stack_len
+ need
+ 10;
146 ctx
->stack
= xrealloc (ctx
->stack
,
147 newlen
* sizeof (struct dwarf_stack_value
));
148 ctx
->stack_allocated
= newlen
;
152 /* Push VALUE onto CTX's stack. */
155 dwarf_expr_push (struct dwarf_expr_context
*ctx
, struct value
*value
,
158 struct dwarf_stack_value
*v
;
160 dwarf_expr_grow_stack (ctx
, 1);
161 v
= &ctx
->stack
[ctx
->stack_len
++];
163 v
->in_stack_memory
= in_stack_memory
;
166 /* Push VALUE onto CTX's stack. */
169 dwarf_expr_push_address (struct dwarf_expr_context
*ctx
, CORE_ADDR value
,
172 dwarf_expr_push (ctx
,
173 value_from_ulongest (dwarf_expr_address_type (ctx
), value
),
177 /* Pop the top item off of CTX's stack. */
180 dwarf_expr_pop (struct dwarf_expr_context
*ctx
)
182 if (ctx
->stack_len
<= 0)
183 error (_("dwarf expression stack underflow"));
187 /* Retrieve the N'th item on CTX's stack. */
190 dwarf_expr_fetch (struct dwarf_expr_context
*ctx
, int n
)
192 if (ctx
->stack_len
<= n
)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
196 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].value
;
199 /* Require that TYPE be an integral type; throw an exception if not. */
202 dwarf_require_integral (struct type
*type
)
204 if (TYPE_CODE (type
) != TYPE_CODE_INT
205 && TYPE_CODE (type
) != TYPE_CODE_CHAR
206 && TYPE_CODE (type
) != TYPE_CODE_BOOL
)
207 error (_("integral type expected in DWARF expression"));
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
214 get_unsigned_type (struct gdbarch
*gdbarch
, struct type
*type
)
216 switch (TYPE_LENGTH (type
))
219 return builtin_type (gdbarch
)->builtin_uint8
;
221 return builtin_type (gdbarch
)->builtin_uint16
;
223 return builtin_type (gdbarch
)->builtin_uint32
;
225 return builtin_type (gdbarch
)->builtin_uint64
;
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
232 /* Retrieve the N'th item on CTX's stack, converted to an address. */
235 dwarf_expr_fetch_address (struct dwarf_expr_context
*ctx
, int n
)
237 struct value
*result_val
= dwarf_expr_fetch (ctx
, n
);
238 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
241 dwarf_require_integral (value_type (result_val
));
242 result
= extract_unsigned_integer (value_contents (result_val
),
243 TYPE_LENGTH (value_type (result_val
)),
246 /* For most architectures, calling extract_unsigned_integer() alone
247 is sufficient for extracting an address. However, some
248 architectures (e.g. MIPS) use signed addresses and using
249 extract_unsigned_integer() will not produce a correct
250 result. Make sure we invoke gdbarch_integer_to_address()
251 for those architectures which require it. */
252 if (gdbarch_integer_to_address_p (ctx
->gdbarch
))
254 gdb_byte
*buf
= alloca (ctx
->addr_size
);
255 struct type
*int_type
= get_unsigned_type (ctx
->gdbarch
,
256 value_type (result_val
));
258 store_unsigned_integer (buf
, ctx
->addr_size
, byte_order
, result
);
259 return gdbarch_integer_to_address (ctx
->gdbarch
, int_type
, buf
);
262 return (CORE_ADDR
) result
;
265 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
268 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context
*ctx
, int n
)
270 if (ctx
->stack_len
<= n
)
271 error (_("Asked for position %d of stack, "
272 "stack only has %d elements on it."),
274 return ctx
->stack
[ctx
->stack_len
- (1 + n
)].in_stack_memory
;
277 /* Return true if the expression stack is empty. */
280 dwarf_expr_stack_empty_p (struct dwarf_expr_context
*ctx
)
282 return ctx
->stack_len
== 0;
285 /* Add a new piece to CTX's piece list. */
287 add_piece (struct dwarf_expr_context
*ctx
, ULONGEST size
, ULONGEST offset
)
289 struct dwarf_expr_piece
*p
;
293 ctx
->pieces
= xrealloc (ctx
->pieces
,
295 * sizeof (struct dwarf_expr_piece
)));
297 p
= &ctx
->pieces
[ctx
->num_pieces
- 1];
298 p
->location
= ctx
->location
;
302 if (p
->location
== DWARF_VALUE_LITERAL
)
304 p
->v
.literal
.data
= ctx
->data
;
305 p
->v
.literal
.length
= ctx
->len
;
307 else if (dwarf_expr_stack_empty_p (ctx
))
309 p
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
310 /* Also reset the context's location, for our callers. This is
311 a somewhat strange approach, but this lets us avoid setting
312 the location to DWARF_VALUE_MEMORY in all the individual
313 cases in the evaluator. */
314 ctx
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
316 else if (p
->location
== DWARF_VALUE_MEMORY
)
318 p
->v
.mem
.addr
= dwarf_expr_fetch_address (ctx
, 0);
319 p
->v
.mem
.in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
321 else if (p
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
323 p
->v
.ptr
.die
= ctx
->len
;
324 p
->v
.ptr
.offset
= value_as_long (dwarf_expr_fetch (ctx
, 0));
326 else if (p
->location
== DWARF_VALUE_REGISTER
)
327 p
->v
.regno
= value_as_long (dwarf_expr_fetch (ctx
, 0));
330 p
->v
.value
= dwarf_expr_fetch (ctx
, 0);
334 /* Evaluate the expression at ADDR (LEN bytes long) using the context
338 dwarf_expr_eval (struct dwarf_expr_context
*ctx
, const gdb_byte
*addr
,
341 int old_recursion_depth
= ctx
->recursion_depth
;
343 execute_stack_op (ctx
, addr
, addr
+ len
);
345 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
347 gdb_assert (ctx
->recursion_depth
== old_recursion_depth
);
350 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
351 by R, and return the new value of BUF. Verify that it doesn't extend
355 read_uleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
, ULONGEST
* r
)
364 error (_("read_uleb128: Corrupted DWARF expression."));
367 result
|= (byte
& 0x7f) << shift
;
368 if ((byte
& 0x80) == 0)
376 /* Decode the signed LEB128 constant at BUF into the variable pointed to
377 by R, and return the new value of BUF. Verify that it doesn't extend
381 read_sleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
, LONGEST
* r
)
390 error (_("read_sleb128: Corrupted DWARF expression."));
393 result
|= (byte
& 0x7f) << shift
;
395 if ((byte
& 0x80) == 0)
398 if (shift
< (sizeof (*r
) * 8) && (byte
& 0x40) != 0)
399 result
|= -(1 << shift
);
406 /* Check that the current operator is either at the end of an
407 expression, or that it is followed by a composition operator. */
410 dwarf_expr_require_composition (const gdb_byte
*op_ptr
, const gdb_byte
*op_end
,
413 /* It seems like DW_OP_GNU_uninit should be handled here. However,
414 it doesn't seem to make sense for DW_OP_*_value, and it was not
415 checked at the other place that this function is called. */
416 if (op_ptr
!= op_end
&& *op_ptr
!= DW_OP_piece
&& *op_ptr
!= DW_OP_bit_piece
)
417 error (_("DWARF-2 expression error: `%s' operations must be "
418 "used either alone or in conjuction with DW_OP_piece "
419 "or DW_OP_bit_piece."),
423 /* Return true iff the types T1 and T2 are "the same". This only does
424 checks that might reasonably be needed to compare DWARF base
428 base_types_equal_p (struct type
*t1
, struct type
*t2
)
430 if (TYPE_CODE (t1
) != TYPE_CODE (t2
))
432 if (TYPE_UNSIGNED (t1
) != TYPE_UNSIGNED (t2
))
434 return TYPE_LENGTH (t1
) == TYPE_LENGTH (t2
);
437 /* A convenience function to call get_base_type on CTX and return the
438 result. DIE is the DIE whose type we need. SIZE is non-zero if
439 this function should verify that the resulting type has the correct
443 dwarf_get_base_type (struct dwarf_expr_context
*ctx
, ULONGEST die
, int size
)
447 if (ctx
->get_base_type
)
449 result
= ctx
->get_base_type (ctx
, die
);
450 if (size
!= 0 && TYPE_LENGTH (result
) != size
)
451 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
454 /* Anything will do. */
455 result
= builtin_type (ctx
->gdbarch
)->builtin_int
;
460 /* The engine for the expression evaluator. Using the context in CTX,
461 evaluate the expression between OP_PTR and OP_END. */
464 execute_stack_op (struct dwarf_expr_context
*ctx
,
465 const gdb_byte
*op_ptr
, const gdb_byte
*op_end
)
467 enum bfd_endian byte_order
= gdbarch_byte_order (ctx
->gdbarch
);
468 /* Old-style "untyped" DWARF values need special treatment in a
469 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
470 a special type for these values so we can distinguish them from
471 values that have an explicit type, because explicitly-typed
472 values do not need special treatment. This special type must be
473 different (in the `==' sense) from any base type coming from the
475 struct type
*address_type
= dwarf_expr_address_type (ctx
);
477 ctx
->location
= DWARF_VALUE_MEMORY
;
478 ctx
->initialized
= 1; /* Default is initialized. */
480 if (ctx
->recursion_depth
> ctx
->max_recursion_depth
)
481 error (_("DWARF-2 expression error: Loop detected (%d)."),
482 ctx
->recursion_depth
);
483 ctx
->recursion_depth
++;
485 while (op_ptr
< op_end
)
487 enum dwarf_location_atom op
= *op_ptr
++;
489 /* Assume the value is not in stack memory.
490 Code that knows otherwise sets this to 1.
491 Some arithmetic on stack addresses can probably be assumed to still
492 be a stack address, but we skip this complication for now.
493 This is just an optimization, so it's always ok to punt
494 and leave this as 0. */
495 int in_stack_memory
= 0;
496 ULONGEST uoffset
, reg
;
498 struct value
*result_val
= NULL
;
534 result
= op
- DW_OP_lit0
;
535 result_val
= value_from_ulongest (address_type
, result
);
539 result
= extract_unsigned_integer (op_ptr
,
540 ctx
->addr_size
, byte_order
);
541 op_ptr
+= ctx
->addr_size
;
542 /* Some versions of GCC emit DW_OP_addr before
543 DW_OP_GNU_push_tls_address. In this case the value is an
544 index, not an address. We don't support things like
545 branching between the address and the TLS op. */
546 if (op_ptr
>= op_end
|| *op_ptr
!= DW_OP_GNU_push_tls_address
)
547 result
+= ctx
->offset
;
548 result_val
= value_from_ulongest (address_type
, result
);
552 result
= extract_unsigned_integer (op_ptr
, 1, byte_order
);
553 result_val
= value_from_ulongest (address_type
, result
);
557 result
= extract_signed_integer (op_ptr
, 1, byte_order
);
558 result_val
= value_from_ulongest (address_type
, result
);
562 result
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
563 result_val
= value_from_ulongest (address_type
, result
);
567 result
= extract_signed_integer (op_ptr
, 2, byte_order
);
568 result_val
= value_from_ulongest (address_type
, result
);
572 result
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
573 result_val
= value_from_ulongest (address_type
, result
);
577 result
= extract_signed_integer (op_ptr
, 4, byte_order
);
578 result_val
= value_from_ulongest (address_type
, result
);
582 result
= extract_unsigned_integer (op_ptr
, 8, byte_order
);
583 result_val
= value_from_ulongest (address_type
, result
);
587 result
= extract_signed_integer (op_ptr
, 8, byte_order
);
588 result_val
= value_from_ulongest (address_type
, result
);
592 op_ptr
= read_uleb128 (op_ptr
, op_end
, &uoffset
);
594 result_val
= value_from_ulongest (address_type
, result
);
597 op_ptr
= read_sleb128 (op_ptr
, op_end
, &offset
);
599 result_val
= value_from_ulongest (address_type
, result
);
602 /* The DW_OP_reg operations are required to occur alone in
603 location expressions. */
637 && *op_ptr
!= DW_OP_piece
638 && *op_ptr
!= DW_OP_bit_piece
639 && *op_ptr
!= DW_OP_GNU_uninit
)
640 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
641 "used either alone or in conjuction with DW_OP_piece "
642 "or DW_OP_bit_piece."));
644 result
= op
- DW_OP_reg0
;
645 result_val
= value_from_ulongest (address_type
, result
);
646 ctx
->location
= DWARF_VALUE_REGISTER
;
650 op_ptr
= read_uleb128 (op_ptr
, op_end
, ®
);
651 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_regx");
654 result_val
= value_from_ulongest (address_type
, result
);
655 ctx
->location
= DWARF_VALUE_REGISTER
;
658 case DW_OP_implicit_value
:
662 op_ptr
= read_uleb128 (op_ptr
, op_end
, &len
);
663 if (op_ptr
+ len
> op_end
)
664 error (_("DW_OP_implicit_value: too few bytes available."));
667 ctx
->location
= DWARF_VALUE_LITERAL
;
669 dwarf_expr_require_composition (op_ptr
, op_end
,
670 "DW_OP_implicit_value");
674 case DW_OP_stack_value
:
675 ctx
->location
= DWARF_VALUE_STACK
;
676 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_stack_value");
679 case DW_OP_GNU_implicit_pointer
:
684 /* The referred-to DIE. */
685 ctx
->len
= extract_unsigned_integer (op_ptr
, ctx
->addr_size
,
687 op_ptr
+= ctx
->addr_size
;
689 /* The byte offset into the data. */
690 op_ptr
= read_sleb128 (op_ptr
, op_end
, &len
);
691 result
= (ULONGEST
) len
;
692 result_val
= value_from_ulongest (address_type
, result
);
694 ctx
->location
= DWARF_VALUE_IMPLICIT_POINTER
;
695 dwarf_expr_require_composition (op_ptr
, op_end
,
696 "DW_OP_GNU_implicit_pointer");
733 op_ptr
= read_sleb128 (op_ptr
, op_end
, &offset
);
734 result
= (ctx
->read_reg
) (ctx
->baton
, op
- DW_OP_breg0
);
736 result_val
= value_from_ulongest (address_type
, result
);
741 op_ptr
= read_uleb128 (op_ptr
, op_end
, ®
);
742 op_ptr
= read_sleb128 (op_ptr
, op_end
, &offset
);
743 result
= (ctx
->read_reg
) (ctx
->baton
, reg
);
745 result_val
= value_from_ulongest (address_type
, result
);
750 const gdb_byte
*datastart
;
752 unsigned int before_stack_len
;
754 op_ptr
= read_sleb128 (op_ptr
, op_end
, &offset
);
755 /* Rather than create a whole new context, we simply
756 record the stack length before execution, then reset it
757 afterwards, effectively erasing whatever the recursive
759 before_stack_len
= ctx
->stack_len
;
760 /* FIXME: cagney/2003-03-26: This code should be using
761 get_frame_base_address(), and then implement a dwarf2
762 specific this_base method. */
763 (ctx
->get_frame_base
) (ctx
->baton
, &datastart
, &datalen
);
764 dwarf_expr_eval (ctx
, datastart
, datalen
);
765 if (ctx
->location
== DWARF_VALUE_MEMORY
)
766 result
= dwarf_expr_fetch_address (ctx
, 0);
767 else if (ctx
->location
== DWARF_VALUE_REGISTER
)
769 = (ctx
->read_reg
) (ctx
->baton
,
770 value_as_long (dwarf_expr_fetch (ctx
, 0)));
772 error (_("Not implemented: computing frame "
773 "base using explicit value operator"));
774 result
= result
+ offset
;
775 result_val
= value_from_ulongest (address_type
, result
);
777 ctx
->stack_len
= before_stack_len
;
778 ctx
->location
= DWARF_VALUE_MEMORY
;
783 result_val
= dwarf_expr_fetch (ctx
, 0);
784 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 0);
788 dwarf_expr_pop (ctx
);
793 result_val
= dwarf_expr_fetch (ctx
, offset
);
794 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, offset
);
799 struct dwarf_stack_value t1
, t2
;
801 if (ctx
->stack_len
< 2)
802 error (_("Not enough elements for "
803 "DW_OP_swap. Need 2, have %d."),
805 t1
= ctx
->stack
[ctx
->stack_len
- 1];
806 t2
= ctx
->stack
[ctx
->stack_len
- 2];
807 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
808 ctx
->stack
[ctx
->stack_len
- 2] = t1
;
813 result_val
= dwarf_expr_fetch (ctx
, 1);
814 in_stack_memory
= dwarf_expr_fetch_in_stack_memory (ctx
, 1);
819 struct dwarf_stack_value t1
, t2
, t3
;
821 if (ctx
->stack_len
< 3)
822 error (_("Not enough elements for "
823 "DW_OP_rot. Need 3, have %d."),
825 t1
= ctx
->stack
[ctx
->stack_len
- 1];
826 t2
= ctx
->stack
[ctx
->stack_len
- 2];
827 t3
= ctx
->stack
[ctx
->stack_len
- 3];
828 ctx
->stack
[ctx
->stack_len
- 1] = t2
;
829 ctx
->stack
[ctx
->stack_len
- 2] = t3
;
830 ctx
->stack
[ctx
->stack_len
- 3] = t1
;
835 case DW_OP_deref_size
:
836 case DW_OP_GNU_deref_type
:
838 int addr_size
= (op
== DW_OP_deref
? ctx
->addr_size
: *op_ptr
++);
839 gdb_byte
*buf
= alloca (addr_size
);
840 CORE_ADDR addr
= dwarf_expr_fetch_address (ctx
, 0);
843 dwarf_expr_pop (ctx
);
845 if (op
== DW_OP_GNU_deref_type
)
849 op_ptr
= read_uleb128 (op_ptr
, op_end
, &type_die
);
850 type
= dwarf_get_base_type (ctx
, type_die
, 0);
855 (ctx
->read_mem
) (ctx
->baton
, buf
, addr
, addr_size
);
856 result_val
= value_from_contents_and_address (type
, buf
, addr
);
863 case DW_OP_plus_uconst
:
865 /* Unary operations. */
866 result_val
= dwarf_expr_fetch (ctx
, 0);
867 dwarf_expr_pop (ctx
);
872 if (value_less (result_val
,
873 value_zero (value_type (result_val
), not_lval
)))
874 result_val
= value_neg (result_val
);
877 result_val
= value_neg (result_val
);
880 dwarf_require_integral (value_type (result_val
));
881 result_val
= value_complement (result_val
);
883 case DW_OP_plus_uconst
:
884 dwarf_require_integral (value_type (result_val
));
885 result
= value_as_long (result_val
);
886 op_ptr
= read_uleb128 (op_ptr
, op_end
, ®
);
888 result_val
= value_from_ulongest (address_type
, result
);
912 /* Binary operations. */
913 struct value
*first
, *second
;
915 second
= dwarf_expr_fetch (ctx
, 0);
916 dwarf_expr_pop (ctx
);
918 first
= dwarf_expr_fetch (ctx
, 0);
919 dwarf_expr_pop (ctx
);
921 if (! base_types_equal_p (value_type (first
), value_type (second
)))
922 error (_("Incompatible types on DWARF stack"));
927 dwarf_require_integral (value_type (first
));
928 dwarf_require_integral (value_type (second
));
929 result_val
= value_binop (first
, second
, BINOP_BITWISE_AND
);
932 result_val
= value_binop (first
, second
, BINOP_DIV
);
935 result_val
= value_binop (first
, second
, BINOP_SUB
);
940 struct type
*orig_type
= value_type (first
);
942 /* We have to special-case "old-style" untyped values
943 -- these must have mod computed using unsigned
945 if (orig_type
== address_type
)
948 = get_unsigned_type (ctx
->gdbarch
, orig_type
);
951 first
= value_cast (utype
, first
);
952 second
= value_cast (utype
, second
);
954 /* Note that value_binop doesn't handle float or
955 decimal float here. This seems unimportant. */
956 result_val
= value_binop (first
, second
, BINOP_MOD
);
958 result_val
= value_cast (orig_type
, result_val
);
962 result_val
= value_binop (first
, second
, BINOP_MUL
);
965 dwarf_require_integral (value_type (first
));
966 dwarf_require_integral (value_type (second
));
967 result_val
= value_binop (first
, second
, BINOP_BITWISE_IOR
);
970 result_val
= value_binop (first
, second
, BINOP_ADD
);
973 dwarf_require_integral (value_type (first
));
974 dwarf_require_integral (value_type (second
));
975 result_val
= value_binop (first
, second
, BINOP_LSH
);
978 dwarf_require_integral (value_type (first
));
979 dwarf_require_integral (value_type (second
));
980 if (!TYPE_UNSIGNED (value_type (first
)))
983 = get_unsigned_type (ctx
->gdbarch
, value_type (first
));
985 first
= value_cast (utype
, first
);
988 result_val
= value_binop (first
, second
, BINOP_RSH
);
989 /* Make sure we wind up with the same type we started
991 if (value_type (result_val
) != value_type (second
))
992 result_val
= value_cast (value_type (second
), result_val
);
995 dwarf_require_integral (value_type (first
));
996 dwarf_require_integral (value_type (second
));
997 result_val
= value_binop (first
, second
, BINOP_RSH
);
1000 dwarf_require_integral (value_type (first
));
1001 dwarf_require_integral (value_type (second
));
1002 result_val
= value_binop (first
, second
, BINOP_BITWISE_XOR
);
1005 /* A <= B is !(B < A). */
1006 result
= ! value_less (second
, first
);
1007 result_val
= value_from_ulongest (address_type
, result
);
1010 /* A >= B is !(A < B). */
1011 result
= ! value_less (first
, second
);
1012 result_val
= value_from_ulongest (address_type
, result
);
1015 result
= value_equal (first
, second
);
1016 result_val
= value_from_ulongest (address_type
, result
);
1019 result
= value_less (first
, second
);
1020 result_val
= value_from_ulongest (address_type
, result
);
1023 /* A > B is B < A. */
1024 result
= value_less (second
, first
);
1025 result_val
= value_from_ulongest (address_type
, result
);
1028 result
= ! value_equal (first
, second
);
1029 result_val
= value_from_ulongest (address_type
, result
);
1032 internal_error (__FILE__
, __LINE__
,
1033 _("Can't be reached."));
1038 case DW_OP_call_frame_cfa
:
1039 result
= (ctx
->get_frame_cfa
) (ctx
->baton
);
1040 result_val
= value_from_ulongest (address_type
, result
);
1041 in_stack_memory
= 1;
1044 case DW_OP_GNU_push_tls_address
:
1045 /* Variable is at a constant offset in the thread-local
1046 storage block into the objfile for the current thread and
1047 the dynamic linker module containing this expression. Here
1048 we return returns the offset from that base. The top of the
1049 stack has the offset from the beginning of the thread
1050 control block at which the variable is located. Nothing
1051 should follow this operator, so the top of stack would be
1053 result
= value_as_long (dwarf_expr_fetch (ctx
, 0));
1054 dwarf_expr_pop (ctx
);
1055 result
= (ctx
->get_tls_address
) (ctx
->baton
, result
);
1056 result_val
= value_from_ulongest (address_type
, result
);
1060 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1069 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
1071 val
= dwarf_expr_fetch (ctx
, 0);
1072 dwarf_require_integral (value_type (val
));
1073 if (value_as_long (val
) != 0)
1075 dwarf_expr_pop (ctx
);
1086 /* Record the piece. */
1087 op_ptr
= read_uleb128 (op_ptr
, op_end
, &size
);
1088 add_piece (ctx
, 8 * size
, 0);
1090 /* Pop off the address/regnum, and reset the location
1092 if (ctx
->location
!= DWARF_VALUE_LITERAL
1093 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1094 dwarf_expr_pop (ctx
);
1095 ctx
->location
= DWARF_VALUE_MEMORY
;
1099 case DW_OP_bit_piece
:
1101 ULONGEST size
, offset
;
1103 /* Record the piece. */
1104 op_ptr
= read_uleb128 (op_ptr
, op_end
, &size
);
1105 op_ptr
= read_uleb128 (op_ptr
, op_end
, &offset
);
1106 add_piece (ctx
, size
, offset
);
1108 /* Pop off the address/regnum, and reset the location
1110 if (ctx
->location
!= DWARF_VALUE_LITERAL
1111 && ctx
->location
!= DWARF_VALUE_OPTIMIZED_OUT
)
1112 dwarf_expr_pop (ctx
);
1113 ctx
->location
= DWARF_VALUE_MEMORY
;
1117 case DW_OP_GNU_uninit
:
1118 if (op_ptr
!= op_end
)
1119 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1120 "be the very last op."));
1122 ctx
->initialized
= 0;
1126 result
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
1128 ctx
->dwarf_call (ctx
, result
);
1132 result
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
1134 ctx
->dwarf_call (ctx
, result
);
1137 case DW_OP_GNU_entry_value
:
1138 /* This operation is not yet supported by GDB. */
1139 ctx
->location
= DWARF_VALUE_OPTIMIZED_OUT
;
1141 ctx
->num_pieces
= 0;
1142 goto abort_expression
;
1144 case DW_OP_GNU_const_type
:
1148 const gdb_byte
*data
;
1151 op_ptr
= read_uleb128 (op_ptr
, op_end
, &type_die
);
1156 type
= dwarf_get_base_type (ctx
, type_die
, n
);
1157 result_val
= value_from_contents (type
, data
);
1161 case DW_OP_GNU_regval_type
:
1166 op_ptr
= read_uleb128 (op_ptr
, op_end
, ®
);
1167 op_ptr
= read_uleb128 (op_ptr
, op_end
, &type_die
);
1169 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1170 result
= (ctx
->read_reg
) (ctx
->baton
, reg
);
1171 result_val
= value_from_ulongest (type
, result
);
1175 case DW_OP_GNU_convert
:
1176 case DW_OP_GNU_reinterpret
:
1181 op_ptr
= read_uleb128 (op_ptr
, op_end
, &type_die
);
1183 type
= dwarf_get_base_type (ctx
, type_die
, 0);
1185 result_val
= dwarf_expr_fetch (ctx
, 0);
1186 dwarf_expr_pop (ctx
);
1188 if (op
== DW_OP_GNU_convert
)
1189 result_val
= value_cast (type
, result_val
);
1190 else if (type
== value_type (result_val
))
1194 else if (TYPE_LENGTH (type
)
1195 != TYPE_LENGTH (value_type (result_val
)))
1196 error (_("DW_OP_GNU_reinterpret has wrong size"));
1199 = value_from_contents (type
,
1200 value_contents_all (result_val
));
1205 error (_("Unhandled dwarf expression opcode 0x%x"), op
);
1208 /* Most things push a result value. */
1209 gdb_assert (result_val
!= NULL
);
1210 dwarf_expr_push (ctx
, result_val
, in_stack_memory
);
1215 /* To simplify our main caller, if the result is an implicit
1216 pointer, then make a pieced value. This is ok because we can't
1217 have implicit pointers in contexts where pieces are invalid. */
1218 if (ctx
->location
== DWARF_VALUE_IMPLICIT_POINTER
)
1219 add_piece (ctx
, 8 * ctx
->addr_size
, 0);
1222 ctx
->recursion_depth
--;
1223 gdb_assert (ctx
->recursion_depth
>= 0);
1227 _initialize_dwarf2expr (void)
1230 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init
);