Commit | Line | Data |
---|---|---|
852483bc MK |
1 | /* DWARF 2 Expression Evaluator. |
2 | ||
7b6bb8da | 3 | Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011 |
9b254dd1 | 4 | Free Software Foundation, Inc. |
852483bc | 5 | |
4c2df51b DJ |
6 | Contributed by Daniel Berlin (dan@dberlin.org) |
7 | ||
8 | This file is part of GDB. | |
9 | ||
10 | This program is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 12 | the Free Software Foundation; either version 3 of the License, or |
4c2df51b DJ |
13 | (at your option) any later version. |
14 | ||
15 | This program is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
a9762ec7 | 21 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
4c2df51b DJ |
22 | |
23 | #include "defs.h" | |
24 | #include "symtab.h" | |
25 | #include "gdbtypes.h" | |
26 | #include "value.h" | |
27 | #include "gdbcore.h" | |
fa8f86ff | 28 | #include "dwarf2.h" |
4c2df51b | 29 | #include "dwarf2expr.h" |
1e3a102a | 30 | #include "gdb_assert.h" |
4c2df51b DJ |
31 | |
32 | /* Local prototypes. */ | |
33 | ||
34 | static void execute_stack_op (struct dwarf_expr_context *, | |
0d45f56e | 35 | const gdb_byte *, const gdb_byte *); |
4c2df51b | 36 | |
8a9b8146 TT |
37 | /* Cookie for gdbarch data. */ |
38 | ||
39 | static struct gdbarch_data *dwarf_arch_cookie; | |
40 | ||
41 | /* This holds gdbarch-specific types used by the DWARF expression | |
42 | evaluator. See comments in execute_stack_op. */ | |
43 | ||
44 | struct dwarf_gdbarch_types | |
45 | { | |
46 | struct type *dw_types[3]; | |
47 | }; | |
48 | ||
49 | /* Allocate and fill in dwarf_gdbarch_types for an arch. */ | |
50 | ||
51 | static void * | |
52 | dwarf_gdbarch_types_init (struct gdbarch *gdbarch) | |
53 | { | |
54 | struct dwarf_gdbarch_types *types | |
55 | = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types); | |
56 | ||
57 | /* The types themselves are lazily initialized. */ | |
58 | ||
59 | return types; | |
60 | } | |
61 | ||
62 | /* Return the type used for DWARF operations where the type is | |
63 | unspecified in the DWARF spec. Only certain sizes are | |
64 | supported. */ | |
65 | ||
66 | static struct type * | |
67 | dwarf_expr_address_type (struct dwarf_expr_context *ctx) | |
68 | { | |
69 | struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch, | |
70 | dwarf_arch_cookie); | |
71 | int ndx; | |
72 | ||
73 | if (ctx->addr_size == 2) | |
74 | ndx = 0; | |
75 | else if (ctx->addr_size == 4) | |
76 | ndx = 1; | |
77 | else if (ctx->addr_size == 8) | |
78 | ndx = 2; | |
79 | else | |
80 | error (_("Unsupported address size in DWARF expressions: %d bits"), | |
81 | 8 * ctx->addr_size); | |
82 | ||
83 | if (types->dw_types[ndx] == NULL) | |
84 | types->dw_types[ndx] | |
85 | = arch_integer_type (ctx->gdbarch, | |
86 | 8 * ctx->addr_size, | |
87 | 0, "<signed DWARF address type>"); | |
88 | ||
89 | return types->dw_types[ndx]; | |
90 | } | |
91 | ||
4c2df51b DJ |
92 | /* Create a new context for the expression evaluator. */ |
93 | ||
94 | struct dwarf_expr_context * | |
e4adbba9 | 95 | new_dwarf_expr_context (void) |
4c2df51b DJ |
96 | { |
97 | struct dwarf_expr_context *retval; | |
9a619af0 | 98 | |
4c2df51b | 99 | retval = xcalloc (1, sizeof (struct dwarf_expr_context)); |
18ec9831 KB |
100 | retval->stack_len = 0; |
101 | retval->stack_allocated = 10; | |
b966cb8a TT |
102 | retval->stack = xmalloc (retval->stack_allocated |
103 | * sizeof (struct dwarf_stack_value)); | |
87808bd6 JB |
104 | retval->num_pieces = 0; |
105 | retval->pieces = 0; | |
1e3a102a | 106 | retval->max_recursion_depth = 0x100; |
4c2df51b DJ |
107 | return retval; |
108 | } | |
109 | ||
110 | /* Release the memory allocated to CTX. */ | |
111 | ||
112 | void | |
113 | free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
114 | { | |
115 | xfree (ctx->stack); | |
87808bd6 | 116 | xfree (ctx->pieces); |
4c2df51b DJ |
117 | xfree (ctx); |
118 | } | |
119 | ||
4a227398 TT |
120 | /* Helper for make_cleanup_free_dwarf_expr_context. */ |
121 | ||
122 | static void | |
123 | free_dwarf_expr_context_cleanup (void *arg) | |
124 | { | |
125 | free_dwarf_expr_context (arg); | |
126 | } | |
127 | ||
128 | /* Return a cleanup that calls free_dwarf_expr_context. */ | |
129 | ||
130 | struct cleanup * | |
131 | make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx) | |
132 | { | |
133 | return make_cleanup (free_dwarf_expr_context_cleanup, ctx); | |
134 | } | |
135 | ||
4c2df51b DJ |
136 | /* Expand the memory allocated to CTX's stack to contain at least |
137 | NEED more elements than are currently used. */ | |
138 | ||
139 | static void | |
140 | dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need) | |
141 | { | |
142 | if (ctx->stack_len + need > ctx->stack_allocated) | |
143 | { | |
18ec9831 | 144 | size_t newlen = ctx->stack_len + need + 10; |
9a619af0 | 145 | |
4c2df51b | 146 | ctx->stack = xrealloc (ctx->stack, |
44353522 | 147 | newlen * sizeof (struct dwarf_stack_value)); |
18ec9831 | 148 | ctx->stack_allocated = newlen; |
4c2df51b DJ |
149 | } |
150 | } | |
151 | ||
152 | /* Push VALUE onto CTX's stack. */ | |
153 | ||
8a9b8146 TT |
154 | static void |
155 | dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value, | |
44353522 | 156 | int in_stack_memory) |
4c2df51b | 157 | { |
44353522 DE |
158 | struct dwarf_stack_value *v; |
159 | ||
4c2df51b | 160 | dwarf_expr_grow_stack (ctx, 1); |
44353522 DE |
161 | v = &ctx->stack[ctx->stack_len++]; |
162 | v->value = value; | |
163 | v->in_stack_memory = in_stack_memory; | |
4c2df51b DJ |
164 | } |
165 | ||
8a9b8146 | 166 | /* Push VALUE onto CTX's stack. */ |
4c2df51b DJ |
167 | |
168 | void | |
8a9b8146 TT |
169 | dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value, |
170 | int in_stack_memory) | |
171 | { | |
172 | dwarf_expr_push (ctx, | |
173 | value_from_ulongest (dwarf_expr_address_type (ctx), value), | |
174 | in_stack_memory); | |
175 | } | |
176 | ||
177 | /* Pop the top item off of CTX's stack. */ | |
178 | ||
179 | static void | |
4c2df51b DJ |
180 | dwarf_expr_pop (struct dwarf_expr_context *ctx) |
181 | { | |
182 | if (ctx->stack_len <= 0) | |
8a3fe4f8 | 183 | error (_("dwarf expression stack underflow")); |
4c2df51b DJ |
184 | ctx->stack_len--; |
185 | } | |
186 | ||
187 | /* Retrieve the N'th item on CTX's stack. */ | |
188 | ||
8a9b8146 | 189 | struct value * |
4c2df51b DJ |
190 | dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n) |
191 | { | |
ef0fdf07 | 192 | if (ctx->stack_len <= n) |
3e43a32a MS |
193 | error (_("Asked for position %d of stack, " |
194 | "stack only has %d elements on it."), | |
4c2df51b | 195 | n, ctx->stack_len); |
44353522 | 196 | return ctx->stack[ctx->stack_len - (1 + n)].value; |
8a9b8146 TT |
197 | } |
198 | ||
199 | /* Require that TYPE be an integral type; throw an exception if not. */ | |
44353522 | 200 | |
8a9b8146 TT |
201 | static void |
202 | dwarf_require_integral (struct type *type) | |
203 | { | |
204 | if (TYPE_CODE (type) != TYPE_CODE_INT | |
205 | && TYPE_CODE (type) != TYPE_CODE_CHAR | |
206 | && TYPE_CODE (type) != TYPE_CODE_BOOL) | |
207 | error (_("integral type expected in DWARF expression")); | |
208 | } | |
209 | ||
210 | /* Return the unsigned form of TYPE. TYPE is necessarily an integral | |
211 | type. */ | |
212 | ||
213 | static struct type * | |
214 | get_unsigned_type (struct gdbarch *gdbarch, struct type *type) | |
215 | { | |
216 | switch (TYPE_LENGTH (type)) | |
217 | { | |
218 | case 1: | |
219 | return builtin_type (gdbarch)->builtin_uint8; | |
220 | case 2: | |
221 | return builtin_type (gdbarch)->builtin_uint16; | |
222 | case 4: | |
223 | return builtin_type (gdbarch)->builtin_uint32; | |
224 | case 8: | |
225 | return builtin_type (gdbarch)->builtin_uint64; | |
226 | default: | |
227 | error (_("no unsigned variant found for type, while evaluating " | |
228 | "DWARF expression")); | |
229 | } | |
44353522 DE |
230 | } |
231 | ||
f2c7657e UW |
232 | /* Retrieve the N'th item on CTX's stack, converted to an address. */ |
233 | ||
234 | CORE_ADDR | |
235 | dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n) | |
236 | { | |
8a9b8146 TT |
237 | struct value *result_val = dwarf_expr_fetch (ctx, n); |
238 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); | |
239 | ULONGEST result; | |
240 | ||
241 | dwarf_require_integral (value_type (result_val)); | |
242 | result = extract_unsigned_integer (value_contents (result_val), | |
243 | TYPE_LENGTH (value_type (result_val)), | |
244 | byte_order); | |
f2c7657e UW |
245 | |
246 | /* For most architectures, calling extract_unsigned_integer() alone | |
247 | is sufficient for extracting an address. However, some | |
248 | architectures (e.g. MIPS) use signed addresses and using | |
249 | extract_unsigned_integer() will not produce a correct | |
250 | result. Make sure we invoke gdbarch_integer_to_address() | |
251 | for those architectures which require it. */ | |
252 | if (gdbarch_integer_to_address_p (ctx->gdbarch)) | |
253 | { | |
f2c7657e | 254 | gdb_byte *buf = alloca (ctx->addr_size); |
8a9b8146 TT |
255 | struct type *int_type = get_unsigned_type (ctx->gdbarch, |
256 | value_type (result_val)); | |
f2c7657e UW |
257 | |
258 | store_unsigned_integer (buf, ctx->addr_size, byte_order, result); | |
259 | return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf); | |
260 | } | |
261 | ||
262 | return (CORE_ADDR) result; | |
263 | } | |
264 | ||
44353522 DE |
265 | /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */ |
266 | ||
267 | int | |
268 | dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n) | |
269 | { | |
270 | if (ctx->stack_len <= n) | |
3e43a32a MS |
271 | error (_("Asked for position %d of stack, " |
272 | "stack only has %d elements on it."), | |
44353522 DE |
273 | n, ctx->stack_len); |
274 | return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory; | |
4c2df51b DJ |
275 | } |
276 | ||
cb826367 TT |
277 | /* Return true if the expression stack is empty. */ |
278 | ||
279 | static int | |
280 | dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx) | |
281 | { | |
282 | return ctx->stack_len == 0; | |
283 | } | |
284 | ||
87808bd6 JB |
285 | /* Add a new piece to CTX's piece list. */ |
286 | static void | |
d3b1e874 | 287 | add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset) |
87808bd6 JB |
288 | { |
289 | struct dwarf_expr_piece *p; | |
290 | ||
291 | ctx->num_pieces++; | |
292 | ||
d3b1e874 TT |
293 | ctx->pieces = xrealloc (ctx->pieces, |
294 | (ctx->num_pieces | |
295 | * sizeof (struct dwarf_expr_piece))); | |
87808bd6 JB |
296 | |
297 | p = &ctx->pieces[ctx->num_pieces - 1]; | |
cec03d70 | 298 | p->location = ctx->location; |
87808bd6 | 299 | p->size = size; |
d3b1e874 TT |
300 | p->offset = offset; |
301 | ||
cec03d70 TT |
302 | if (p->location == DWARF_VALUE_LITERAL) |
303 | { | |
304 | p->v.literal.data = ctx->data; | |
305 | p->v.literal.length = ctx->len; | |
306 | } | |
cb826367 TT |
307 | else if (dwarf_expr_stack_empty_p (ctx)) |
308 | { | |
309 | p->location = DWARF_VALUE_OPTIMIZED_OUT; | |
310 | /* Also reset the context's location, for our callers. This is | |
311 | a somewhat strange approach, but this lets us avoid setting | |
312 | the location to DWARF_VALUE_MEMORY in all the individual | |
313 | cases in the evaluator. */ | |
314 | ctx->location = DWARF_VALUE_OPTIMIZED_OUT; | |
315 | } | |
f2c7657e UW |
316 | else if (p->location == DWARF_VALUE_MEMORY) |
317 | { | |
318 | p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0); | |
319 | p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); | |
320 | } | |
8cf6f0b1 TT |
321 | else if (p->location == DWARF_VALUE_IMPLICIT_POINTER) |
322 | { | |
323 | p->v.ptr.die = ctx->len; | |
8a9b8146 | 324 | p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0)); |
8cf6f0b1 | 325 | } |
8a9b8146 TT |
326 | else if (p->location == DWARF_VALUE_REGISTER) |
327 | p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0)); | |
cec03d70 | 328 | else |
44353522 | 329 | { |
f2c7657e | 330 | p->v.value = dwarf_expr_fetch (ctx, 0); |
44353522 | 331 | } |
87808bd6 JB |
332 | } |
333 | ||
4c2df51b DJ |
334 | /* Evaluate the expression at ADDR (LEN bytes long) using the context |
335 | CTX. */ | |
336 | ||
337 | void | |
0d45f56e TT |
338 | dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr, |
339 | size_t len) | |
4c2df51b | 340 | { |
1e3a102a JK |
341 | int old_recursion_depth = ctx->recursion_depth; |
342 | ||
4c2df51b | 343 | execute_stack_op (ctx, addr, addr + len); |
1e3a102a JK |
344 | |
345 | /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */ | |
346 | ||
347 | gdb_assert (ctx->recursion_depth == old_recursion_depth); | |
4c2df51b DJ |
348 | } |
349 | ||
350 | /* Decode the unsigned LEB128 constant at BUF into the variable pointed to | |
351 | by R, and return the new value of BUF. Verify that it doesn't extend | |
352 | past BUF_END. */ | |
353 | ||
0d45f56e TT |
354 | const gdb_byte * |
355 | read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r) | |
4c2df51b DJ |
356 | { |
357 | unsigned shift = 0; | |
358 | ULONGEST result = 0; | |
852483bc | 359 | gdb_byte byte; |
4c2df51b DJ |
360 | |
361 | while (1) | |
362 | { | |
363 | if (buf >= buf_end) | |
8a3fe4f8 | 364 | error (_("read_uleb128: Corrupted DWARF expression.")); |
4c2df51b DJ |
365 | |
366 | byte = *buf++; | |
367 | result |= (byte & 0x7f) << shift; | |
368 | if ((byte & 0x80) == 0) | |
369 | break; | |
370 | shift += 7; | |
371 | } | |
372 | *r = result; | |
373 | return buf; | |
374 | } | |
375 | ||
376 | /* Decode the signed LEB128 constant at BUF into the variable pointed to | |
377 | by R, and return the new value of BUF. Verify that it doesn't extend | |
378 | past BUF_END. */ | |
379 | ||
0d45f56e TT |
380 | const gdb_byte * |
381 | read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r) | |
4c2df51b DJ |
382 | { |
383 | unsigned shift = 0; | |
384 | LONGEST result = 0; | |
852483bc | 385 | gdb_byte byte; |
4c2df51b DJ |
386 | |
387 | while (1) | |
388 | { | |
389 | if (buf >= buf_end) | |
8a3fe4f8 | 390 | error (_("read_sleb128: Corrupted DWARF expression.")); |
4c2df51b DJ |
391 | |
392 | byte = *buf++; | |
393 | result |= (byte & 0x7f) << shift; | |
394 | shift += 7; | |
395 | if ((byte & 0x80) == 0) | |
396 | break; | |
397 | } | |
398 | if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0) | |
399 | result |= -(1 << shift); | |
400 | ||
401 | *r = result; | |
402 | return buf; | |
403 | } | |
4c2df51b | 404 | \f |
cec03d70 TT |
405 | |
406 | /* Check that the current operator is either at the end of an | |
407 | expression, or that it is followed by a composition operator. */ | |
408 | ||
3cf03773 TT |
409 | void |
410 | dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end, | |
411 | const char *op_name) | |
cec03d70 TT |
412 | { |
413 | /* It seems like DW_OP_GNU_uninit should be handled here. However, | |
414 | it doesn't seem to make sense for DW_OP_*_value, and it was not | |
415 | checked at the other place that this function is called. */ | |
416 | if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece) | |
417 | error (_("DWARF-2 expression error: `%s' operations must be " | |
418 | "used either alone or in conjuction with DW_OP_piece " | |
419 | "or DW_OP_bit_piece."), | |
420 | op_name); | |
421 | } | |
422 | ||
8a9b8146 TT |
423 | /* Return true iff the types T1 and T2 are "the same". This only does |
424 | checks that might reasonably be needed to compare DWARF base | |
425 | types. */ | |
426 | ||
427 | static int | |
428 | base_types_equal_p (struct type *t1, struct type *t2) | |
429 | { | |
430 | if (TYPE_CODE (t1) != TYPE_CODE (t2)) | |
431 | return 0; | |
432 | if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2)) | |
433 | return 0; | |
434 | return TYPE_LENGTH (t1) == TYPE_LENGTH (t2); | |
435 | } | |
436 | ||
437 | /* A convenience function to call get_base_type on CTX and return the | |
438 | result. DIE is the DIE whose type we need. SIZE is non-zero if | |
439 | this function should verify that the resulting type has the correct | |
440 | size. */ | |
441 | ||
442 | static struct type * | |
443 | dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size) | |
444 | { | |
445 | struct type *result; | |
446 | ||
447 | if (ctx->get_base_type) | |
448 | { | |
449 | result = ctx->get_base_type (ctx, die); | |
450 | if (size != 0 && TYPE_LENGTH (result) != size) | |
451 | error (_("DW_OP_GNU_const_type has different sizes for type and data")); | |
452 | } | |
453 | else | |
454 | /* Anything will do. */ | |
455 | result = builtin_type (ctx->gdbarch)->builtin_int; | |
456 | ||
457 | return result; | |
458 | } | |
459 | ||
4c2df51b DJ |
460 | /* The engine for the expression evaluator. Using the context in CTX, |
461 | evaluate the expression between OP_PTR and OP_END. */ | |
462 | ||
463 | static void | |
852483bc | 464 | execute_stack_op (struct dwarf_expr_context *ctx, |
0d45f56e | 465 | const gdb_byte *op_ptr, const gdb_byte *op_end) |
4c2df51b | 466 | { |
e17a4113 | 467 | enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch); |
8a9b8146 TT |
468 | /* Old-style "untyped" DWARF values need special treatment in a |
469 | couple of places, specifically DW_OP_mod and DW_OP_shr. We need | |
470 | a special type for these values so we can distinguish them from | |
471 | values that have an explicit type, because explicitly-typed | |
472 | values do not need special treatment. This special type must be | |
473 | different (in the `==' sense) from any base type coming from the | |
474 | CU. */ | |
475 | struct type *address_type = dwarf_expr_address_type (ctx); | |
9a619af0 | 476 | |
cec03d70 | 477 | ctx->location = DWARF_VALUE_MEMORY; |
42be36b3 | 478 | ctx->initialized = 1; /* Default is initialized. */ |
18ec9831 | 479 | |
1e3a102a JK |
480 | if (ctx->recursion_depth > ctx->max_recursion_depth) |
481 | error (_("DWARF-2 expression error: Loop detected (%d)."), | |
482 | ctx->recursion_depth); | |
483 | ctx->recursion_depth++; | |
484 | ||
4c2df51b DJ |
485 | while (op_ptr < op_end) |
486 | { | |
487 | enum dwarf_location_atom op = *op_ptr++; | |
f2c7657e | 488 | ULONGEST result; |
44353522 DE |
489 | /* Assume the value is not in stack memory. |
490 | Code that knows otherwise sets this to 1. | |
491 | Some arithmetic on stack addresses can probably be assumed to still | |
492 | be a stack address, but we skip this complication for now. | |
493 | This is just an optimization, so it's always ok to punt | |
494 | and leave this as 0. */ | |
495 | int in_stack_memory = 0; | |
4c2df51b DJ |
496 | ULONGEST uoffset, reg; |
497 | LONGEST offset; | |
8a9b8146 | 498 | struct value *result_val = NULL; |
4c2df51b | 499 | |
4c2df51b DJ |
500 | switch (op) |
501 | { | |
502 | case DW_OP_lit0: | |
503 | case DW_OP_lit1: | |
504 | case DW_OP_lit2: | |
505 | case DW_OP_lit3: | |
506 | case DW_OP_lit4: | |
507 | case DW_OP_lit5: | |
508 | case DW_OP_lit6: | |
509 | case DW_OP_lit7: | |
510 | case DW_OP_lit8: | |
511 | case DW_OP_lit9: | |
512 | case DW_OP_lit10: | |
513 | case DW_OP_lit11: | |
514 | case DW_OP_lit12: | |
515 | case DW_OP_lit13: | |
516 | case DW_OP_lit14: | |
517 | case DW_OP_lit15: | |
518 | case DW_OP_lit16: | |
519 | case DW_OP_lit17: | |
520 | case DW_OP_lit18: | |
521 | case DW_OP_lit19: | |
522 | case DW_OP_lit20: | |
523 | case DW_OP_lit21: | |
524 | case DW_OP_lit22: | |
525 | case DW_OP_lit23: | |
526 | case DW_OP_lit24: | |
527 | case DW_OP_lit25: | |
528 | case DW_OP_lit26: | |
529 | case DW_OP_lit27: | |
530 | case DW_OP_lit28: | |
531 | case DW_OP_lit29: | |
532 | case DW_OP_lit30: | |
533 | case DW_OP_lit31: | |
534 | result = op - DW_OP_lit0; | |
8a9b8146 | 535 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
536 | break; |
537 | ||
538 | case DW_OP_addr: | |
f2c7657e UW |
539 | result = extract_unsigned_integer (op_ptr, |
540 | ctx->addr_size, byte_order); | |
ae0d2f24 | 541 | op_ptr += ctx->addr_size; |
ac56253d TT |
542 | /* Some versions of GCC emit DW_OP_addr before |
543 | DW_OP_GNU_push_tls_address. In this case the value is an | |
544 | index, not an address. We don't support things like | |
545 | branching between the address and the TLS op. */ | |
546 | if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address) | |
547 | result += ctx->offset; | |
8a9b8146 | 548 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
549 | break; |
550 | ||
551 | case DW_OP_const1u: | |
e17a4113 | 552 | result = extract_unsigned_integer (op_ptr, 1, byte_order); |
8a9b8146 | 553 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
554 | op_ptr += 1; |
555 | break; | |
556 | case DW_OP_const1s: | |
e17a4113 | 557 | result = extract_signed_integer (op_ptr, 1, byte_order); |
8a9b8146 | 558 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
559 | op_ptr += 1; |
560 | break; | |
561 | case DW_OP_const2u: | |
e17a4113 | 562 | result = extract_unsigned_integer (op_ptr, 2, byte_order); |
8a9b8146 | 563 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
564 | op_ptr += 2; |
565 | break; | |
566 | case DW_OP_const2s: | |
e17a4113 | 567 | result = extract_signed_integer (op_ptr, 2, byte_order); |
8a9b8146 | 568 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
569 | op_ptr += 2; |
570 | break; | |
571 | case DW_OP_const4u: | |
e17a4113 | 572 | result = extract_unsigned_integer (op_ptr, 4, byte_order); |
8a9b8146 | 573 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
574 | op_ptr += 4; |
575 | break; | |
576 | case DW_OP_const4s: | |
e17a4113 | 577 | result = extract_signed_integer (op_ptr, 4, byte_order); |
8a9b8146 | 578 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
579 | op_ptr += 4; |
580 | break; | |
581 | case DW_OP_const8u: | |
e17a4113 | 582 | result = extract_unsigned_integer (op_ptr, 8, byte_order); |
8a9b8146 | 583 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
584 | op_ptr += 8; |
585 | break; | |
586 | case DW_OP_const8s: | |
e17a4113 | 587 | result = extract_signed_integer (op_ptr, 8, byte_order); |
8a9b8146 | 588 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
589 | op_ptr += 8; |
590 | break; | |
591 | case DW_OP_constu: | |
592 | op_ptr = read_uleb128 (op_ptr, op_end, &uoffset); | |
593 | result = uoffset; | |
8a9b8146 | 594 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
595 | break; |
596 | case DW_OP_consts: | |
597 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
598 | result = offset; | |
8a9b8146 | 599 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
600 | break; |
601 | ||
602 | /* The DW_OP_reg operations are required to occur alone in | |
603 | location expressions. */ | |
604 | case DW_OP_reg0: | |
605 | case DW_OP_reg1: | |
606 | case DW_OP_reg2: | |
607 | case DW_OP_reg3: | |
608 | case DW_OP_reg4: | |
609 | case DW_OP_reg5: | |
610 | case DW_OP_reg6: | |
611 | case DW_OP_reg7: | |
612 | case DW_OP_reg8: | |
613 | case DW_OP_reg9: | |
614 | case DW_OP_reg10: | |
615 | case DW_OP_reg11: | |
616 | case DW_OP_reg12: | |
617 | case DW_OP_reg13: | |
618 | case DW_OP_reg14: | |
619 | case DW_OP_reg15: | |
620 | case DW_OP_reg16: | |
621 | case DW_OP_reg17: | |
622 | case DW_OP_reg18: | |
623 | case DW_OP_reg19: | |
624 | case DW_OP_reg20: | |
625 | case DW_OP_reg21: | |
626 | case DW_OP_reg22: | |
627 | case DW_OP_reg23: | |
628 | case DW_OP_reg24: | |
629 | case DW_OP_reg25: | |
630 | case DW_OP_reg26: | |
631 | case DW_OP_reg27: | |
632 | case DW_OP_reg28: | |
633 | case DW_OP_reg29: | |
634 | case DW_OP_reg30: | |
635 | case DW_OP_reg31: | |
42be36b3 CT |
636 | if (op_ptr != op_end |
637 | && *op_ptr != DW_OP_piece | |
d3b1e874 | 638 | && *op_ptr != DW_OP_bit_piece |
42be36b3 | 639 | && *op_ptr != DW_OP_GNU_uninit) |
8a3fe4f8 | 640 | error (_("DWARF-2 expression error: DW_OP_reg operations must be " |
d3b1e874 TT |
641 | "used either alone or in conjuction with DW_OP_piece " |
642 | "or DW_OP_bit_piece.")); | |
4c2df51b | 643 | |
61fbb938 | 644 | result = op - DW_OP_reg0; |
8a9b8146 | 645 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 646 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
647 | break; |
648 | ||
649 | case DW_OP_regx: | |
650 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
3cf03773 | 651 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx"); |
4c2df51b | 652 | |
61fbb938 | 653 | result = reg; |
8a9b8146 | 654 | result_val = value_from_ulongest (address_type, result); |
cec03d70 | 655 | ctx->location = DWARF_VALUE_REGISTER; |
4c2df51b DJ |
656 | break; |
657 | ||
cec03d70 TT |
658 | case DW_OP_implicit_value: |
659 | { | |
660 | ULONGEST len; | |
9a619af0 | 661 | |
cec03d70 TT |
662 | op_ptr = read_uleb128 (op_ptr, op_end, &len); |
663 | if (op_ptr + len > op_end) | |
664 | error (_("DW_OP_implicit_value: too few bytes available.")); | |
665 | ctx->len = len; | |
666 | ctx->data = op_ptr; | |
667 | ctx->location = DWARF_VALUE_LITERAL; | |
668 | op_ptr += len; | |
3cf03773 TT |
669 | dwarf_expr_require_composition (op_ptr, op_end, |
670 | "DW_OP_implicit_value"); | |
cec03d70 TT |
671 | } |
672 | goto no_push; | |
673 | ||
674 | case DW_OP_stack_value: | |
675 | ctx->location = DWARF_VALUE_STACK; | |
3cf03773 | 676 | dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value"); |
cec03d70 TT |
677 | goto no_push; |
678 | ||
8cf6f0b1 TT |
679 | case DW_OP_GNU_implicit_pointer: |
680 | { | |
681 | ULONGEST die; | |
682 | LONGEST len; | |
683 | ||
684 | /* The referred-to DIE. */ | |
685 | ctx->len = extract_unsigned_integer (op_ptr, ctx->addr_size, | |
686 | byte_order); | |
687 | op_ptr += ctx->addr_size; | |
688 | ||
689 | /* The byte offset into the data. */ | |
690 | op_ptr = read_sleb128 (op_ptr, op_end, &len); | |
691 | result = (ULONGEST) len; | |
8a9b8146 | 692 | result_val = value_from_ulongest (address_type, result); |
8cf6f0b1 TT |
693 | |
694 | ctx->location = DWARF_VALUE_IMPLICIT_POINTER; | |
695 | dwarf_expr_require_composition (op_ptr, op_end, | |
696 | "DW_OP_GNU_implicit_pointer"); | |
697 | } | |
698 | break; | |
699 | ||
4c2df51b DJ |
700 | case DW_OP_breg0: |
701 | case DW_OP_breg1: | |
702 | case DW_OP_breg2: | |
703 | case DW_OP_breg3: | |
704 | case DW_OP_breg4: | |
705 | case DW_OP_breg5: | |
706 | case DW_OP_breg6: | |
707 | case DW_OP_breg7: | |
708 | case DW_OP_breg8: | |
709 | case DW_OP_breg9: | |
710 | case DW_OP_breg10: | |
711 | case DW_OP_breg11: | |
712 | case DW_OP_breg12: | |
713 | case DW_OP_breg13: | |
714 | case DW_OP_breg14: | |
715 | case DW_OP_breg15: | |
716 | case DW_OP_breg16: | |
717 | case DW_OP_breg17: | |
718 | case DW_OP_breg18: | |
719 | case DW_OP_breg19: | |
720 | case DW_OP_breg20: | |
721 | case DW_OP_breg21: | |
722 | case DW_OP_breg22: | |
723 | case DW_OP_breg23: | |
724 | case DW_OP_breg24: | |
725 | case DW_OP_breg25: | |
726 | case DW_OP_breg26: | |
727 | case DW_OP_breg27: | |
728 | case DW_OP_breg28: | |
729 | case DW_OP_breg29: | |
730 | case DW_OP_breg30: | |
731 | case DW_OP_breg31: | |
732 | { | |
733 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
61fbb938 | 734 | result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0); |
4c2df51b | 735 | result += offset; |
8a9b8146 | 736 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
737 | } |
738 | break; | |
739 | case DW_OP_bregx: | |
740 | { | |
741 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
742 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
61fbb938 | 743 | result = (ctx->read_reg) (ctx->baton, reg); |
4c2df51b | 744 | result += offset; |
8a9b8146 | 745 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
746 | } |
747 | break; | |
748 | case DW_OP_fbreg: | |
749 | { | |
0d45f56e | 750 | const gdb_byte *datastart; |
4c2df51b DJ |
751 | size_t datalen; |
752 | unsigned int before_stack_len; | |
753 | ||
754 | op_ptr = read_sleb128 (op_ptr, op_end, &offset); | |
755 | /* Rather than create a whole new context, we simply | |
756 | record the stack length before execution, then reset it | |
757 | afterwards, effectively erasing whatever the recursive | |
758 | call put there. */ | |
759 | before_stack_len = ctx->stack_len; | |
da62e633 AC |
760 | /* FIXME: cagney/2003-03-26: This code should be using |
761 | get_frame_base_address(), and then implement a dwarf2 | |
762 | specific this_base method. */ | |
4c2df51b DJ |
763 | (ctx->get_frame_base) (ctx->baton, &datastart, &datalen); |
764 | dwarf_expr_eval (ctx, datastart, datalen); | |
f2c7657e UW |
765 | if (ctx->location == DWARF_VALUE_MEMORY) |
766 | result = dwarf_expr_fetch_address (ctx, 0); | |
767 | else if (ctx->location == DWARF_VALUE_REGISTER) | |
8a9b8146 TT |
768 | result |
769 | = (ctx->read_reg) (ctx->baton, | |
770 | value_as_long (dwarf_expr_fetch (ctx, 0))); | |
f2c7657e | 771 | else |
3e43a32a MS |
772 | error (_("Not implemented: computing frame " |
773 | "base using explicit value operator")); | |
4c2df51b | 774 | result = result + offset; |
8a9b8146 | 775 | result_val = value_from_ulongest (address_type, result); |
44353522 | 776 | in_stack_memory = 1; |
4c2df51b | 777 | ctx->stack_len = before_stack_len; |
cec03d70 | 778 | ctx->location = DWARF_VALUE_MEMORY; |
4c2df51b DJ |
779 | } |
780 | break; | |
44353522 | 781 | |
4c2df51b | 782 | case DW_OP_dup: |
8a9b8146 | 783 | result_val = dwarf_expr_fetch (ctx, 0); |
44353522 | 784 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0); |
4c2df51b DJ |
785 | break; |
786 | ||
787 | case DW_OP_drop: | |
788 | dwarf_expr_pop (ctx); | |
789 | goto no_push; | |
790 | ||
791 | case DW_OP_pick: | |
792 | offset = *op_ptr++; | |
8a9b8146 | 793 | result_val = dwarf_expr_fetch (ctx, offset); |
44353522 | 794 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset); |
4c2df51b | 795 | break; |
9f3fe11c TG |
796 | |
797 | case DW_OP_swap: | |
798 | { | |
44353522 | 799 | struct dwarf_stack_value t1, t2; |
9f3fe11c TG |
800 | |
801 | if (ctx->stack_len < 2) | |
3e43a32a | 802 | error (_("Not enough elements for " |
0963b4bd | 803 | "DW_OP_swap. Need 2, have %d."), |
9f3fe11c TG |
804 | ctx->stack_len); |
805 | t1 = ctx->stack[ctx->stack_len - 1]; | |
806 | t2 = ctx->stack[ctx->stack_len - 2]; | |
807 | ctx->stack[ctx->stack_len - 1] = t2; | |
808 | ctx->stack[ctx->stack_len - 2] = t1; | |
809 | goto no_push; | |
810 | } | |
4c2df51b DJ |
811 | |
812 | case DW_OP_over: | |
8a9b8146 | 813 | result_val = dwarf_expr_fetch (ctx, 1); |
44353522 | 814 | in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1); |
4c2df51b DJ |
815 | break; |
816 | ||
817 | case DW_OP_rot: | |
818 | { | |
44353522 | 819 | struct dwarf_stack_value t1, t2, t3; |
4c2df51b DJ |
820 | |
821 | if (ctx->stack_len < 3) | |
0963b4bd MS |
822 | error (_("Not enough elements for " |
823 | "DW_OP_rot. Need 3, have %d."), | |
4c2df51b DJ |
824 | ctx->stack_len); |
825 | t1 = ctx->stack[ctx->stack_len - 1]; | |
826 | t2 = ctx->stack[ctx->stack_len - 2]; | |
827 | t3 = ctx->stack[ctx->stack_len - 3]; | |
828 | ctx->stack[ctx->stack_len - 1] = t2; | |
829 | ctx->stack[ctx->stack_len - 2] = t3; | |
830 | ctx->stack[ctx->stack_len - 3] = t1; | |
831 | goto no_push; | |
832 | } | |
833 | ||
834 | case DW_OP_deref: | |
835 | case DW_OP_deref_size: | |
8a9b8146 | 836 | case DW_OP_GNU_deref_type: |
f2c7657e UW |
837 | { |
838 | int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++); | |
839 | gdb_byte *buf = alloca (addr_size); | |
840 | CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0); | |
8a9b8146 TT |
841 | struct type *type; |
842 | ||
f2c7657e UW |
843 | dwarf_expr_pop (ctx); |
844 | ||
8a9b8146 TT |
845 | if (op == DW_OP_GNU_deref_type) |
846 | { | |
847 | ULONGEST type_die; | |
848 | ||
849 | op_ptr = read_uleb128 (op_ptr, op_end, &type_die); | |
850 | type = dwarf_get_base_type (ctx, type_die, 0); | |
851 | } | |
852 | else | |
853 | type = address_type; | |
854 | ||
f2c7657e | 855 | (ctx->read_mem) (ctx->baton, buf, addr, addr_size); |
8a9b8146 | 856 | result_val = value_from_contents_and_address (type, buf, addr); |
f2c7657e UW |
857 | break; |
858 | } | |
859 | ||
4c2df51b DJ |
860 | case DW_OP_abs: |
861 | case DW_OP_neg: | |
862 | case DW_OP_not: | |
863 | case DW_OP_plus_uconst: | |
8a9b8146 TT |
864 | { |
865 | /* Unary operations. */ | |
866 | result_val = dwarf_expr_fetch (ctx, 0); | |
867 | dwarf_expr_pop (ctx); | |
4c2df51b | 868 | |
8a9b8146 TT |
869 | switch (op) |
870 | { | |
871 | case DW_OP_abs: | |
872 | if (value_less (result_val, | |
873 | value_zero (value_type (result_val), not_lval))) | |
874 | result_val = value_neg (result_val); | |
875 | break; | |
876 | case DW_OP_neg: | |
877 | result_val = value_neg (result_val); | |
878 | break; | |
879 | case DW_OP_not: | |
880 | dwarf_require_integral (value_type (result_val)); | |
881 | result_val = value_complement (result_val); | |
882 | break; | |
883 | case DW_OP_plus_uconst: | |
884 | dwarf_require_integral (value_type (result_val)); | |
885 | result = value_as_long (result_val); | |
886 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
887 | result += reg; | |
888 | result_val = value_from_ulongest (address_type, result); | |
889 | break; | |
890 | } | |
891 | } | |
4c2df51b DJ |
892 | break; |
893 | ||
894 | case DW_OP_and: | |
895 | case DW_OP_div: | |
896 | case DW_OP_minus: | |
897 | case DW_OP_mod: | |
898 | case DW_OP_mul: | |
899 | case DW_OP_or: | |
900 | case DW_OP_plus: | |
901 | case DW_OP_shl: | |
902 | case DW_OP_shr: | |
903 | case DW_OP_shra: | |
904 | case DW_OP_xor: | |
905 | case DW_OP_le: | |
906 | case DW_OP_ge: | |
907 | case DW_OP_eq: | |
908 | case DW_OP_lt: | |
909 | case DW_OP_gt: | |
910 | case DW_OP_ne: | |
911 | { | |
f2c7657e | 912 | /* Binary operations. */ |
8a9b8146 | 913 | struct value *first, *second; |
4c2df51b DJ |
914 | |
915 | second = dwarf_expr_fetch (ctx, 0); | |
916 | dwarf_expr_pop (ctx); | |
917 | ||
b263358a | 918 | first = dwarf_expr_fetch (ctx, 0); |
4c2df51b DJ |
919 | dwarf_expr_pop (ctx); |
920 | ||
8a9b8146 TT |
921 | if (! base_types_equal_p (value_type (first), value_type (second))) |
922 | error (_("Incompatible types on DWARF stack")); | |
923 | ||
4c2df51b DJ |
924 | switch (op) |
925 | { | |
926 | case DW_OP_and: | |
8a9b8146 TT |
927 | dwarf_require_integral (value_type (first)); |
928 | dwarf_require_integral (value_type (second)); | |
929 | result_val = value_binop (first, second, BINOP_BITWISE_AND); | |
4c2df51b DJ |
930 | break; |
931 | case DW_OP_div: | |
8a9b8146 | 932 | result_val = value_binop (first, second, BINOP_DIV); |
99c87dab | 933 | break; |
4c2df51b | 934 | case DW_OP_minus: |
8a9b8146 | 935 | result_val = value_binop (first, second, BINOP_SUB); |
4c2df51b DJ |
936 | break; |
937 | case DW_OP_mod: | |
8a9b8146 TT |
938 | { |
939 | int cast_back = 0; | |
940 | struct type *orig_type = value_type (first); | |
941 | ||
942 | /* We have to special-case "old-style" untyped values | |
943 | -- these must have mod computed using unsigned | |
944 | math. */ | |
945 | if (orig_type == address_type) | |
946 | { | |
947 | struct type *utype | |
948 | = get_unsigned_type (ctx->gdbarch, orig_type); | |
949 | ||
950 | cast_back = 1; | |
951 | first = value_cast (utype, first); | |
952 | second = value_cast (utype, second); | |
953 | } | |
954 | /* Note that value_binop doesn't handle float or | |
955 | decimal float here. This seems unimportant. */ | |
956 | result_val = value_binop (first, second, BINOP_MOD); | |
957 | if (cast_back) | |
958 | result_val = value_cast (orig_type, result_val); | |
959 | } | |
4c2df51b DJ |
960 | break; |
961 | case DW_OP_mul: | |
8a9b8146 | 962 | result_val = value_binop (first, second, BINOP_MUL); |
4c2df51b DJ |
963 | break; |
964 | case DW_OP_or: | |
8a9b8146 TT |
965 | dwarf_require_integral (value_type (first)); |
966 | dwarf_require_integral (value_type (second)); | |
967 | result_val = value_binop (first, second, BINOP_BITWISE_IOR); | |
4c2df51b DJ |
968 | break; |
969 | case DW_OP_plus: | |
8a9b8146 | 970 | result_val = value_binop (first, second, BINOP_ADD); |
4c2df51b DJ |
971 | break; |
972 | case DW_OP_shl: | |
8a9b8146 TT |
973 | dwarf_require_integral (value_type (first)); |
974 | dwarf_require_integral (value_type (second)); | |
975 | result_val = value_binop (first, second, BINOP_LSH); | |
4c2df51b DJ |
976 | break; |
977 | case DW_OP_shr: | |
8a9b8146 TT |
978 | dwarf_require_integral (value_type (first)); |
979 | dwarf_require_integral (value_type (second)); | |
b087e0ed | 980 | if (!TYPE_UNSIGNED (value_type (first))) |
8a9b8146 TT |
981 | { |
982 | struct type *utype | |
983 | = get_unsigned_type (ctx->gdbarch, value_type (first)); | |
984 | ||
985 | first = value_cast (utype, first); | |
986 | } | |
987 | ||
988 | result_val = value_binop (first, second, BINOP_RSH); | |
989 | /* Make sure we wind up with the same type we started | |
990 | with. */ | |
991 | if (value_type (result_val) != value_type (second)) | |
992 | result_val = value_cast (value_type (second), result_val); | |
99c87dab | 993 | break; |
4c2df51b | 994 | case DW_OP_shra: |
8a9b8146 TT |
995 | dwarf_require_integral (value_type (first)); |
996 | dwarf_require_integral (value_type (second)); | |
997 | result_val = value_binop (first, second, BINOP_RSH); | |
4c2df51b DJ |
998 | break; |
999 | case DW_OP_xor: | |
8a9b8146 TT |
1000 | dwarf_require_integral (value_type (first)); |
1001 | dwarf_require_integral (value_type (second)); | |
1002 | result_val = value_binop (first, second, BINOP_BITWISE_XOR); | |
4c2df51b DJ |
1003 | break; |
1004 | case DW_OP_le: | |
8a9b8146 TT |
1005 | /* A <= B is !(B < A). */ |
1006 | result = ! value_less (second, first); | |
1007 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1008 | break; |
1009 | case DW_OP_ge: | |
8a9b8146 TT |
1010 | /* A >= B is !(A < B). */ |
1011 | result = ! value_less (first, second); | |
1012 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1013 | break; |
1014 | case DW_OP_eq: | |
8a9b8146 TT |
1015 | result = value_equal (first, second); |
1016 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1017 | break; |
1018 | case DW_OP_lt: | |
8a9b8146 TT |
1019 | result = value_less (first, second); |
1020 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1021 | break; |
1022 | case DW_OP_gt: | |
8a9b8146 TT |
1023 | /* A > B is B < A. */ |
1024 | result = value_less (second, first); | |
1025 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1026 | break; |
1027 | case DW_OP_ne: | |
8a9b8146 TT |
1028 | result = ! value_equal (first, second); |
1029 | result_val = value_from_ulongest (address_type, result); | |
4c2df51b DJ |
1030 | break; |
1031 | default: | |
1032 | internal_error (__FILE__, __LINE__, | |
e2e0b3e5 | 1033 | _("Can't be reached.")); |
4c2df51b | 1034 | } |
4c2df51b DJ |
1035 | } |
1036 | break; | |
1037 | ||
e7802207 TT |
1038 | case DW_OP_call_frame_cfa: |
1039 | result = (ctx->get_frame_cfa) (ctx->baton); | |
8a9b8146 | 1040 | result_val = value_from_ulongest (address_type, result); |
44353522 | 1041 | in_stack_memory = 1; |
e7802207 TT |
1042 | break; |
1043 | ||
4c2df51b | 1044 | case DW_OP_GNU_push_tls_address: |
c3228f12 EZ |
1045 | /* Variable is at a constant offset in the thread-local |
1046 | storage block into the objfile for the current thread and | |
0963b4bd | 1047 | the dynamic linker module containing this expression. Here |
c3228f12 EZ |
1048 | we return returns the offset from that base. The top of the |
1049 | stack has the offset from the beginning of the thread | |
1050 | control block at which the variable is located. Nothing | |
1051 | should follow this operator, so the top of stack would be | |
1052 | returned. */ | |
8a9b8146 | 1053 | result = value_as_long (dwarf_expr_fetch (ctx, 0)); |
4c2df51b DJ |
1054 | dwarf_expr_pop (ctx); |
1055 | result = (ctx->get_tls_address) (ctx->baton, result); | |
8a9b8146 | 1056 | result_val = value_from_ulongest (address_type, result); |
4c2df51b DJ |
1057 | break; |
1058 | ||
1059 | case DW_OP_skip: | |
e17a4113 | 1060 | offset = extract_signed_integer (op_ptr, 2, byte_order); |
4c2df51b DJ |
1061 | op_ptr += 2; |
1062 | op_ptr += offset; | |
1063 | goto no_push; | |
1064 | ||
1065 | case DW_OP_bra: | |
8a9b8146 TT |
1066 | { |
1067 | struct value *val; | |
1068 | ||
1069 | offset = extract_signed_integer (op_ptr, 2, byte_order); | |
1070 | op_ptr += 2; | |
1071 | val = dwarf_expr_fetch (ctx, 0); | |
1072 | dwarf_require_integral (value_type (val)); | |
1073 | if (value_as_long (val) != 0) | |
1074 | op_ptr += offset; | |
1075 | dwarf_expr_pop (ctx); | |
1076 | } | |
4c2df51b DJ |
1077 | goto no_push; |
1078 | ||
1079 | case DW_OP_nop: | |
1080 | goto no_push; | |
1081 | ||
87808bd6 JB |
1082 | case DW_OP_piece: |
1083 | { | |
1084 | ULONGEST size; | |
87808bd6 JB |
1085 | |
1086 | /* Record the piece. */ | |
1087 | op_ptr = read_uleb128 (op_ptr, op_end, &size); | |
d3b1e874 | 1088 | add_piece (ctx, 8 * size, 0); |
87808bd6 | 1089 | |
cec03d70 TT |
1090 | /* Pop off the address/regnum, and reset the location |
1091 | type. */ | |
cb826367 TT |
1092 | if (ctx->location != DWARF_VALUE_LITERAL |
1093 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
cec03d70 TT |
1094 | dwarf_expr_pop (ctx); |
1095 | ctx->location = DWARF_VALUE_MEMORY; | |
87808bd6 JB |
1096 | } |
1097 | goto no_push; | |
1098 | ||
d3b1e874 TT |
1099 | case DW_OP_bit_piece: |
1100 | { | |
1101 | ULONGEST size, offset; | |
1102 | ||
1103 | /* Record the piece. */ | |
1104 | op_ptr = read_uleb128 (op_ptr, op_end, &size); | |
1105 | op_ptr = read_uleb128 (op_ptr, op_end, &offset); | |
1106 | add_piece (ctx, size, offset); | |
1107 | ||
1108 | /* Pop off the address/regnum, and reset the location | |
1109 | type. */ | |
1110 | if (ctx->location != DWARF_VALUE_LITERAL | |
1111 | && ctx->location != DWARF_VALUE_OPTIMIZED_OUT) | |
1112 | dwarf_expr_pop (ctx); | |
1113 | ctx->location = DWARF_VALUE_MEMORY; | |
1114 | } | |
1115 | goto no_push; | |
1116 | ||
42be36b3 CT |
1117 | case DW_OP_GNU_uninit: |
1118 | if (op_ptr != op_end) | |
9c482037 | 1119 | error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always " |
42be36b3 CT |
1120 | "be the very last op.")); |
1121 | ||
1122 | ctx->initialized = 0; | |
1123 | goto no_push; | |
1124 | ||
5c631832 JK |
1125 | case DW_OP_call2: |
1126 | result = extract_unsigned_integer (op_ptr, 2, byte_order); | |
1127 | op_ptr += 2; | |
1128 | ctx->dwarf_call (ctx, result); | |
1129 | goto no_push; | |
1130 | ||
1131 | case DW_OP_call4: | |
1132 | result = extract_unsigned_integer (op_ptr, 4, byte_order); | |
1133 | op_ptr += 4; | |
1134 | ctx->dwarf_call (ctx, result); | |
1135 | goto no_push; | |
dd90784c JK |
1136 | |
1137 | case DW_OP_GNU_entry_value: | |
1138 | /* This operation is not yet supported by GDB. */ | |
1139 | ctx->location = DWARF_VALUE_OPTIMIZED_OUT; | |
1140 | ctx->stack_len = 0; | |
1141 | ctx->num_pieces = 0; | |
1142 | goto abort_expression; | |
5c631832 | 1143 | |
8a9b8146 TT |
1144 | case DW_OP_GNU_const_type: |
1145 | { | |
1146 | ULONGEST type_die; | |
1147 | int n; | |
1148 | const gdb_byte *data; | |
1149 | struct type *type; | |
1150 | ||
1151 | op_ptr = read_uleb128 (op_ptr, op_end, &type_die); | |
1152 | n = *op_ptr++; | |
1153 | data = op_ptr; | |
1154 | op_ptr += n; | |
1155 | ||
1156 | type = dwarf_get_base_type (ctx, type_die, n); | |
1157 | result_val = value_from_contents (type, data); | |
1158 | } | |
1159 | break; | |
1160 | ||
1161 | case DW_OP_GNU_regval_type: | |
1162 | { | |
1163 | ULONGEST type_die; | |
1164 | struct type *type; | |
1165 | ||
1166 | op_ptr = read_uleb128 (op_ptr, op_end, ®); | |
1167 | op_ptr = read_uleb128 (op_ptr, op_end, &type_die); | |
1168 | ||
1169 | type = dwarf_get_base_type (ctx, type_die, 0); | |
1170 | result = (ctx->read_reg) (ctx->baton, reg); | |
1171 | result_val = value_from_ulongest (type, result); | |
1172 | } | |
1173 | break; | |
1174 | ||
1175 | case DW_OP_GNU_convert: | |
1176 | case DW_OP_GNU_reinterpret: | |
1177 | { | |
1178 | ULONGEST type_die; | |
1179 | struct type *type; | |
1180 | ||
1181 | op_ptr = read_uleb128 (op_ptr, op_end, &type_die); | |
1182 | ||
1183 | type = dwarf_get_base_type (ctx, type_die, 0); | |
1184 | ||
1185 | result_val = dwarf_expr_fetch (ctx, 0); | |
1186 | dwarf_expr_pop (ctx); | |
1187 | ||
1188 | if (op == DW_OP_GNU_convert) | |
1189 | result_val = value_cast (type, result_val); | |
1190 | else if (type == value_type (result_val)) | |
1191 | { | |
1192 | /* Nothing. */ | |
1193 | } | |
1194 | else if (TYPE_LENGTH (type) | |
1195 | != TYPE_LENGTH (value_type (result_val))) | |
1196 | error (_("DW_OP_GNU_reinterpret has wrong size")); | |
1197 | else | |
1198 | result_val | |
1199 | = value_from_contents (type, | |
1200 | value_contents_all (result_val)); | |
1201 | } | |
1202 | break; | |
1203 | ||
4c2df51b | 1204 | default: |
8a3fe4f8 | 1205 | error (_("Unhandled dwarf expression opcode 0x%x"), op); |
4c2df51b DJ |
1206 | } |
1207 | ||
1208 | /* Most things push a result value. */ | |
8a9b8146 TT |
1209 | gdb_assert (result_val != NULL); |
1210 | dwarf_expr_push (ctx, result_val, in_stack_memory); | |
82ae4854 | 1211 | no_push: |
b27cf2b3 | 1212 | ; |
4c2df51b | 1213 | } |
1e3a102a | 1214 | |
8cf6f0b1 TT |
1215 | /* To simplify our main caller, if the result is an implicit |
1216 | pointer, then make a pieced value. This is ok because we can't | |
1217 | have implicit pointers in contexts where pieces are invalid. */ | |
1218 | if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER) | |
1219 | add_piece (ctx, 8 * ctx->addr_size, 0); | |
1220 | ||
dd90784c | 1221 | abort_expression: |
1e3a102a JK |
1222 | ctx->recursion_depth--; |
1223 | gdb_assert (ctx->recursion_depth >= 0); | |
8a9b8146 TT |
1224 | } |
1225 | ||
1226 | void | |
1227 | _initialize_dwarf2expr (void) | |
1228 | { | |
1229 | dwarf_arch_cookie | |
1230 | = gdbarch_data_register_post_init (dwarf_gdbarch_types_init); | |
4c2df51b | 1231 | } |