Initial support for Fission. http://gcc.gnu.org/wiki/DebugFission
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2003, 2005, 2007-2012 Free Software Foundation,
4 Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108 }
109
110 /* Release the memory allocated to CTX. */
111
112 void
113 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114 {
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118 }
119
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
121
122 static void
123 free_dwarf_expr_context_cleanup (void *arg)
124 {
125 free_dwarf_expr_context (arg);
126 }
127
128 /* Return a cleanup that calls free_dwarf_expr_context. */
129
130 struct cleanup *
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132 {
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134 }
135
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139 static void
140 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141 {
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
145
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150 }
151
152 /* Push VALUE onto CTX's stack. */
153
154 static void
155 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
156 int in_stack_memory)
157 {
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164 }
165
166 /* Push VALUE onto CTX's stack. */
167
168 void
169 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171 {
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175 }
176
177 /* Pop the top item off of CTX's stack. */
178
179 static void
180 dwarf_expr_pop (struct dwarf_expr_context *ctx)
181 {
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185 }
186
187 /* Retrieve the N'th item on CTX's stack. */
188
189 struct value *
190 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191 {
192 if (ctx->stack_len <= n)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
197 }
198
199 /* Require that TYPE be an integral type; throw an exception if not. */
200
201 static void
202 dwarf_require_integral (struct type *type)
203 {
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208 }
209
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213 static struct type *
214 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215 {
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230 }
231
232 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235 static struct type *
236 get_signed_type (struct gdbarch *gdbarch, struct type *type)
237 {
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252 }
253
254 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256 CORE_ADDR
257 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258 {
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
276 gdb_byte *buf = alloca (ctx->addr_size);
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285 }
286
287 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289 int
290 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291 {
292 if (ctx->stack_len <= n)
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
297 }
298
299 /* Return true if the expression stack is empty. */
300
301 static int
302 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303 {
304 return ctx->stack_len == 0;
305 }
306
307 /* Add a new piece to CTX's piece list. */
308 static void
309 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
310 {
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
322 p->offset = offset;
323
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die.cu_off = ctx->len;
346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
347 }
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
350 else
351 {
352 p->v.value = dwarf_expr_fetch (ctx, 0);
353 }
354 }
355
356 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359 void
360 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
362 {
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370 }
371
372 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
374 past BUF_END. R can be NULL, the constant is then only skipped. */
375
376 const gdb_byte *
377 read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
378 {
379 unsigned shift = 0;
380 ULONGEST result = 0;
381 gdb_byte byte;
382
383 while (1)
384 {
385 if (buf >= buf_end)
386 error (_("read_uleb128: Corrupted DWARF expression."));
387
388 byte = *buf++;
389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
394 if (r)
395 *r = result;
396 return buf;
397 }
398
399 /* Decode the signed LEB128 constant at BUF into the variable pointed to
400 by R, and return the new value of BUF. Verify that it doesn't extend
401 past BUF_END. R can be NULL, the constant is then only skipped. */
402
403 const gdb_byte *
404 read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
405 {
406 unsigned shift = 0;
407 LONGEST result = 0;
408 gdb_byte byte;
409
410 while (1)
411 {
412 if (buf >= buf_end)
413 error (_("read_sleb128: Corrupted DWARF expression."));
414
415 byte = *buf++;
416 result |= ((ULONGEST) (byte & 0x7f)) << shift;
417 shift += 7;
418 if ((byte & 0x80) == 0)
419 break;
420 }
421 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
422 result |= -(((LONGEST) 1) << shift);
423
424 if (r)
425 *r = result;
426 return buf;
427 }
428 \f
429
430 /* Check that the current operator is either at the end of an
431 expression, or that it is followed by a composition operator. */
432
433 void
434 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
435 const char *op_name)
436 {
437 /* It seems like DW_OP_GNU_uninit should be handled here. However,
438 it doesn't seem to make sense for DW_OP_*_value, and it was not
439 checked at the other place that this function is called. */
440 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
441 error (_("DWARF-2 expression error: `%s' operations must be "
442 "used either alone or in conjunction with DW_OP_piece "
443 "or DW_OP_bit_piece."),
444 op_name);
445 }
446
447 /* Return true iff the types T1 and T2 are "the same". This only does
448 checks that might reasonably be needed to compare DWARF base
449 types. */
450
451 static int
452 base_types_equal_p (struct type *t1, struct type *t2)
453 {
454 if (TYPE_CODE (t1) != TYPE_CODE (t2))
455 return 0;
456 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
457 return 0;
458 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
459 }
460
461 /* A convenience function to call get_base_type on CTX and return the
462 result. DIE is the DIE whose type we need. SIZE is non-zero if
463 this function should verify that the resulting type has the correct
464 size. */
465
466 static struct type *
467 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
468 {
469 struct type *result;
470
471 if (ctx->funcs->get_base_type)
472 {
473 result = ctx->funcs->get_base_type (ctx, die);
474 if (result == NULL)
475 error (_("Could not find type for DW_OP_GNU_const_type"));
476 if (size != 0 && TYPE_LENGTH (result) != size)
477 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
478 }
479 else
480 /* Anything will do. */
481 result = builtin_type (ctx->gdbarch)->builtin_int;
482
483 return result;
484 }
485
486 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
487 DWARF register number. Otherwise return -1. */
488
489 int
490 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
491 {
492 ULONGEST dwarf_reg;
493
494 if (buf_end <= buf)
495 return -1;
496 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
497 {
498 if (buf_end - buf != 1)
499 return -1;
500 return *buf - DW_OP_reg0;
501 }
502
503 if (*buf == DW_OP_GNU_regval_type)
504 {
505 buf++;
506 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
507 buf = read_uleb128 (buf, buf_end, NULL);
508 }
509 else if (*buf == DW_OP_regx)
510 {
511 buf++;
512 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
513 }
514 else
515 return -1;
516 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
517 return -1;
518 return dwarf_reg;
519 }
520
521 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
522 DW_OP_deref* return the DWARF register number. Otherwise return -1.
523 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
524 size from DW_OP_deref_size. */
525
526 int
527 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
528 CORE_ADDR *deref_size_return)
529 {
530 ULONGEST dwarf_reg;
531 LONGEST offset;
532
533 if (buf_end <= buf)
534 return -1;
535 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
536 {
537 dwarf_reg = *buf - DW_OP_breg0;
538 buf++;
539 }
540 else if (*buf == DW_OP_bregx)
541 {
542 buf++;
543 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
544 if ((int) dwarf_reg != dwarf_reg)
545 return -1;
546 }
547 else
548 return -1;
549
550 buf = read_sleb128 (buf, buf_end, &offset);
551 if (offset != 0)
552 return -1;
553
554 if (buf >= buf_end)
555 return -1;
556
557 if (*buf == DW_OP_deref)
558 {
559 buf++;
560 *deref_size_return = -1;
561 }
562 else if (*buf == DW_OP_deref_size)
563 {
564 buf++;
565 if (buf >= buf_end)
566 return -1;
567 *deref_size_return = *buf++;
568 }
569 else
570 return -1;
571
572 if (buf != buf_end)
573 return -1;
574
575 return dwarf_reg;
576 }
577
578 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
579 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
580
581 int
582 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
583 CORE_ADDR *fb_offset_return)
584 {
585 LONGEST fb_offset;
586
587 if (buf_end <= buf)
588 return 0;
589
590 if (*buf != DW_OP_fbreg)
591 return 0;
592 buf++;
593
594 buf = read_sleb128 (buf, buf_end, &fb_offset);
595 *fb_offset_return = fb_offset;
596 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
597 return 0;
598
599 return 1;
600 }
601
602 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
603 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
604 The matched SP register number depends on GDBARCH. */
605
606 int
607 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
608 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
609 {
610 ULONGEST dwarf_reg;
611 LONGEST sp_offset;
612
613 if (buf_end <= buf)
614 return 0;
615 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
616 {
617 dwarf_reg = *buf - DW_OP_breg0;
618 buf++;
619 }
620 else
621 {
622 if (*buf != DW_OP_bregx)
623 return 0;
624 buf++;
625 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
626 }
627
628 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
629 != gdbarch_sp_regnum (gdbarch))
630 return 0;
631
632 buf = read_sleb128 (buf, buf_end, &sp_offset);
633 *sp_offset_return = sp_offset;
634 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
635 return 0;
636
637 return 1;
638 }
639
640 /* The engine for the expression evaluator. Using the context in CTX,
641 evaluate the expression between OP_PTR and OP_END. */
642
643 static void
644 execute_stack_op (struct dwarf_expr_context *ctx,
645 const gdb_byte *op_ptr, const gdb_byte *op_end)
646 {
647 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
648 /* Old-style "untyped" DWARF values need special treatment in a
649 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
650 a special type for these values so we can distinguish them from
651 values that have an explicit type, because explicitly-typed
652 values do not need special treatment. This special type must be
653 different (in the `==' sense) from any base type coming from the
654 CU. */
655 struct type *address_type = dwarf_expr_address_type (ctx);
656
657 ctx->location = DWARF_VALUE_MEMORY;
658 ctx->initialized = 1; /* Default is initialized. */
659
660 if (ctx->recursion_depth > ctx->max_recursion_depth)
661 error (_("DWARF-2 expression error: Loop detected (%d)."),
662 ctx->recursion_depth);
663 ctx->recursion_depth++;
664
665 while (op_ptr < op_end)
666 {
667 enum dwarf_location_atom op = *op_ptr++;
668 ULONGEST result;
669 /* Assume the value is not in stack memory.
670 Code that knows otherwise sets this to 1.
671 Some arithmetic on stack addresses can probably be assumed to still
672 be a stack address, but we skip this complication for now.
673 This is just an optimization, so it's always ok to punt
674 and leave this as 0. */
675 int in_stack_memory = 0;
676 ULONGEST uoffset, reg;
677 LONGEST offset;
678 struct value *result_val = NULL;
679
680 /* The DWARF expression might have a bug causing an infinite
681 loop. In that case, quitting is the only way out. */
682 QUIT;
683
684 switch (op)
685 {
686 case DW_OP_lit0:
687 case DW_OP_lit1:
688 case DW_OP_lit2:
689 case DW_OP_lit3:
690 case DW_OP_lit4:
691 case DW_OP_lit5:
692 case DW_OP_lit6:
693 case DW_OP_lit7:
694 case DW_OP_lit8:
695 case DW_OP_lit9:
696 case DW_OP_lit10:
697 case DW_OP_lit11:
698 case DW_OP_lit12:
699 case DW_OP_lit13:
700 case DW_OP_lit14:
701 case DW_OP_lit15:
702 case DW_OP_lit16:
703 case DW_OP_lit17:
704 case DW_OP_lit18:
705 case DW_OP_lit19:
706 case DW_OP_lit20:
707 case DW_OP_lit21:
708 case DW_OP_lit22:
709 case DW_OP_lit23:
710 case DW_OP_lit24:
711 case DW_OP_lit25:
712 case DW_OP_lit26:
713 case DW_OP_lit27:
714 case DW_OP_lit28:
715 case DW_OP_lit29:
716 case DW_OP_lit30:
717 case DW_OP_lit31:
718 result = op - DW_OP_lit0;
719 result_val = value_from_ulongest (address_type, result);
720 break;
721
722 case DW_OP_addr:
723 result = extract_unsigned_integer (op_ptr,
724 ctx->addr_size, byte_order);
725 op_ptr += ctx->addr_size;
726 /* Some versions of GCC emit DW_OP_addr before
727 DW_OP_GNU_push_tls_address. In this case the value is an
728 index, not an address. We don't support things like
729 branching between the address and the TLS op. */
730 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
731 result += ctx->offset;
732 result_val = value_from_ulongest (address_type, result);
733 break;
734
735 case DW_OP_GNU_addr_index:
736 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
737 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
738 result_val = value_from_ulongest (address_type, result);
739 break;
740
741 case DW_OP_const1u:
742 result = extract_unsigned_integer (op_ptr, 1, byte_order);
743 result_val = value_from_ulongest (address_type, result);
744 op_ptr += 1;
745 break;
746 case DW_OP_const1s:
747 result = extract_signed_integer (op_ptr, 1, byte_order);
748 result_val = value_from_ulongest (address_type, result);
749 op_ptr += 1;
750 break;
751 case DW_OP_const2u:
752 result = extract_unsigned_integer (op_ptr, 2, byte_order);
753 result_val = value_from_ulongest (address_type, result);
754 op_ptr += 2;
755 break;
756 case DW_OP_const2s:
757 result = extract_signed_integer (op_ptr, 2, byte_order);
758 result_val = value_from_ulongest (address_type, result);
759 op_ptr += 2;
760 break;
761 case DW_OP_const4u:
762 result = extract_unsigned_integer (op_ptr, 4, byte_order);
763 result_val = value_from_ulongest (address_type, result);
764 op_ptr += 4;
765 break;
766 case DW_OP_const4s:
767 result = extract_signed_integer (op_ptr, 4, byte_order);
768 result_val = value_from_ulongest (address_type, result);
769 op_ptr += 4;
770 break;
771 case DW_OP_const8u:
772 result = extract_unsigned_integer (op_ptr, 8, byte_order);
773 result_val = value_from_ulongest (address_type, result);
774 op_ptr += 8;
775 break;
776 case DW_OP_const8s:
777 result = extract_signed_integer (op_ptr, 8, byte_order);
778 result_val = value_from_ulongest (address_type, result);
779 op_ptr += 8;
780 break;
781 case DW_OP_constu:
782 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
783 result = uoffset;
784 result_val = value_from_ulongest (address_type, result);
785 break;
786 case DW_OP_consts:
787 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
788 result = offset;
789 result_val = value_from_ulongest (address_type, result);
790 break;
791
792 /* The DW_OP_reg operations are required to occur alone in
793 location expressions. */
794 case DW_OP_reg0:
795 case DW_OP_reg1:
796 case DW_OP_reg2:
797 case DW_OP_reg3:
798 case DW_OP_reg4:
799 case DW_OP_reg5:
800 case DW_OP_reg6:
801 case DW_OP_reg7:
802 case DW_OP_reg8:
803 case DW_OP_reg9:
804 case DW_OP_reg10:
805 case DW_OP_reg11:
806 case DW_OP_reg12:
807 case DW_OP_reg13:
808 case DW_OP_reg14:
809 case DW_OP_reg15:
810 case DW_OP_reg16:
811 case DW_OP_reg17:
812 case DW_OP_reg18:
813 case DW_OP_reg19:
814 case DW_OP_reg20:
815 case DW_OP_reg21:
816 case DW_OP_reg22:
817 case DW_OP_reg23:
818 case DW_OP_reg24:
819 case DW_OP_reg25:
820 case DW_OP_reg26:
821 case DW_OP_reg27:
822 case DW_OP_reg28:
823 case DW_OP_reg29:
824 case DW_OP_reg30:
825 case DW_OP_reg31:
826 if (op_ptr != op_end
827 && *op_ptr != DW_OP_piece
828 && *op_ptr != DW_OP_bit_piece
829 && *op_ptr != DW_OP_GNU_uninit)
830 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
831 "used either alone or in conjunction with DW_OP_piece "
832 "or DW_OP_bit_piece."));
833
834 result = op - DW_OP_reg0;
835 result_val = value_from_ulongest (address_type, result);
836 ctx->location = DWARF_VALUE_REGISTER;
837 break;
838
839 case DW_OP_regx:
840 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
841 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
842
843 result = reg;
844 result_val = value_from_ulongest (address_type, result);
845 ctx->location = DWARF_VALUE_REGISTER;
846 break;
847
848 case DW_OP_implicit_value:
849 {
850 ULONGEST len;
851
852 op_ptr = read_uleb128 (op_ptr, op_end, &len);
853 if (op_ptr + len > op_end)
854 error (_("DW_OP_implicit_value: too few bytes available."));
855 ctx->len = len;
856 ctx->data = op_ptr;
857 ctx->location = DWARF_VALUE_LITERAL;
858 op_ptr += len;
859 dwarf_expr_require_composition (op_ptr, op_end,
860 "DW_OP_implicit_value");
861 }
862 goto no_push;
863
864 case DW_OP_stack_value:
865 ctx->location = DWARF_VALUE_STACK;
866 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
867 goto no_push;
868
869 case DW_OP_GNU_implicit_pointer:
870 {
871 ULONGEST die;
872 LONGEST len;
873
874 if (ctx->ref_addr_size == -1)
875 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
876 "is not allowed in frame context"));
877
878 /* The referred-to DIE of cu_offset kind. */
879 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
880 byte_order);
881 op_ptr += ctx->ref_addr_size;
882
883 /* The byte offset into the data. */
884 op_ptr = read_sleb128 (op_ptr, op_end, &len);
885 result = (ULONGEST) len;
886 result_val = value_from_ulongest (address_type, result);
887
888 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
889 dwarf_expr_require_composition (op_ptr, op_end,
890 "DW_OP_GNU_implicit_pointer");
891 }
892 break;
893
894 case DW_OP_breg0:
895 case DW_OP_breg1:
896 case DW_OP_breg2:
897 case DW_OP_breg3:
898 case DW_OP_breg4:
899 case DW_OP_breg5:
900 case DW_OP_breg6:
901 case DW_OP_breg7:
902 case DW_OP_breg8:
903 case DW_OP_breg9:
904 case DW_OP_breg10:
905 case DW_OP_breg11:
906 case DW_OP_breg12:
907 case DW_OP_breg13:
908 case DW_OP_breg14:
909 case DW_OP_breg15:
910 case DW_OP_breg16:
911 case DW_OP_breg17:
912 case DW_OP_breg18:
913 case DW_OP_breg19:
914 case DW_OP_breg20:
915 case DW_OP_breg21:
916 case DW_OP_breg22:
917 case DW_OP_breg23:
918 case DW_OP_breg24:
919 case DW_OP_breg25:
920 case DW_OP_breg26:
921 case DW_OP_breg27:
922 case DW_OP_breg28:
923 case DW_OP_breg29:
924 case DW_OP_breg30:
925 case DW_OP_breg31:
926 {
927 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
928 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
929 result += offset;
930 result_val = value_from_ulongest (address_type, result);
931 }
932 break;
933 case DW_OP_bregx:
934 {
935 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
936 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
937 result = (ctx->funcs->read_reg) (ctx->baton, reg);
938 result += offset;
939 result_val = value_from_ulongest (address_type, result);
940 }
941 break;
942 case DW_OP_fbreg:
943 {
944 const gdb_byte *datastart;
945 size_t datalen;
946 unsigned int before_stack_len;
947
948 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
949 /* Rather than create a whole new context, we simply
950 record the stack length before execution, then reset it
951 afterwards, effectively erasing whatever the recursive
952 call put there. */
953 before_stack_len = ctx->stack_len;
954 /* FIXME: cagney/2003-03-26: This code should be using
955 get_frame_base_address(), and then implement a dwarf2
956 specific this_base method. */
957 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
958 dwarf_expr_eval (ctx, datastart, datalen);
959 if (ctx->location == DWARF_VALUE_MEMORY)
960 result = dwarf_expr_fetch_address (ctx, 0);
961 else if (ctx->location == DWARF_VALUE_REGISTER)
962 result = (ctx->funcs->read_reg) (ctx->baton,
963 value_as_long (dwarf_expr_fetch (ctx, 0)));
964 else
965 error (_("Not implemented: computing frame "
966 "base using explicit value operator"));
967 result = result + offset;
968 result_val = value_from_ulongest (address_type, result);
969 in_stack_memory = 1;
970 ctx->stack_len = before_stack_len;
971 ctx->location = DWARF_VALUE_MEMORY;
972 }
973 break;
974
975 case DW_OP_dup:
976 result_val = dwarf_expr_fetch (ctx, 0);
977 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
978 break;
979
980 case DW_OP_drop:
981 dwarf_expr_pop (ctx);
982 goto no_push;
983
984 case DW_OP_pick:
985 offset = *op_ptr++;
986 result_val = dwarf_expr_fetch (ctx, offset);
987 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
988 break;
989
990 case DW_OP_swap:
991 {
992 struct dwarf_stack_value t1, t2;
993
994 if (ctx->stack_len < 2)
995 error (_("Not enough elements for "
996 "DW_OP_swap. Need 2, have %d."),
997 ctx->stack_len);
998 t1 = ctx->stack[ctx->stack_len - 1];
999 t2 = ctx->stack[ctx->stack_len - 2];
1000 ctx->stack[ctx->stack_len - 1] = t2;
1001 ctx->stack[ctx->stack_len - 2] = t1;
1002 goto no_push;
1003 }
1004
1005 case DW_OP_over:
1006 result_val = dwarf_expr_fetch (ctx, 1);
1007 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1008 break;
1009
1010 case DW_OP_rot:
1011 {
1012 struct dwarf_stack_value t1, t2, t3;
1013
1014 if (ctx->stack_len < 3)
1015 error (_("Not enough elements for "
1016 "DW_OP_rot. Need 3, have %d."),
1017 ctx->stack_len);
1018 t1 = ctx->stack[ctx->stack_len - 1];
1019 t2 = ctx->stack[ctx->stack_len - 2];
1020 t3 = ctx->stack[ctx->stack_len - 3];
1021 ctx->stack[ctx->stack_len - 1] = t2;
1022 ctx->stack[ctx->stack_len - 2] = t3;
1023 ctx->stack[ctx->stack_len - 3] = t1;
1024 goto no_push;
1025 }
1026
1027 case DW_OP_deref:
1028 case DW_OP_deref_size:
1029 case DW_OP_GNU_deref_type:
1030 {
1031 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1032 gdb_byte *buf = alloca (addr_size);
1033 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1034 struct type *type;
1035
1036 dwarf_expr_pop (ctx);
1037
1038 if (op == DW_OP_GNU_deref_type)
1039 {
1040 cu_offset type_die;
1041
1042 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
1043 type_die.cu_off = uoffset;
1044 type = dwarf_get_base_type (ctx, type_die, 0);
1045 }
1046 else
1047 type = address_type;
1048
1049 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1050
1051 /* If the size of the object read from memory is different
1052 from the type length, we need to zero-extend it. */
1053 if (TYPE_LENGTH (type) != addr_size)
1054 {
1055 ULONGEST result =
1056 extract_unsigned_integer (buf, addr_size, byte_order);
1057
1058 buf = alloca (TYPE_LENGTH (type));
1059 store_unsigned_integer (buf, TYPE_LENGTH (type),
1060 byte_order, result);
1061 }
1062
1063 result_val = value_from_contents_and_address (type, buf, addr);
1064 break;
1065 }
1066
1067 case DW_OP_abs:
1068 case DW_OP_neg:
1069 case DW_OP_not:
1070 case DW_OP_plus_uconst:
1071 {
1072 /* Unary operations. */
1073 result_val = dwarf_expr_fetch (ctx, 0);
1074 dwarf_expr_pop (ctx);
1075
1076 switch (op)
1077 {
1078 case DW_OP_abs:
1079 if (value_less (result_val,
1080 value_zero (value_type (result_val), not_lval)))
1081 result_val = value_neg (result_val);
1082 break;
1083 case DW_OP_neg:
1084 result_val = value_neg (result_val);
1085 break;
1086 case DW_OP_not:
1087 dwarf_require_integral (value_type (result_val));
1088 result_val = value_complement (result_val);
1089 break;
1090 case DW_OP_plus_uconst:
1091 dwarf_require_integral (value_type (result_val));
1092 result = value_as_long (result_val);
1093 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1094 result += reg;
1095 result_val = value_from_ulongest (address_type, result);
1096 break;
1097 }
1098 }
1099 break;
1100
1101 case DW_OP_and:
1102 case DW_OP_div:
1103 case DW_OP_minus:
1104 case DW_OP_mod:
1105 case DW_OP_mul:
1106 case DW_OP_or:
1107 case DW_OP_plus:
1108 case DW_OP_shl:
1109 case DW_OP_shr:
1110 case DW_OP_shra:
1111 case DW_OP_xor:
1112 case DW_OP_le:
1113 case DW_OP_ge:
1114 case DW_OP_eq:
1115 case DW_OP_lt:
1116 case DW_OP_gt:
1117 case DW_OP_ne:
1118 {
1119 /* Binary operations. */
1120 struct value *first, *second;
1121
1122 second = dwarf_expr_fetch (ctx, 0);
1123 dwarf_expr_pop (ctx);
1124
1125 first = dwarf_expr_fetch (ctx, 0);
1126 dwarf_expr_pop (ctx);
1127
1128 if (! base_types_equal_p (value_type (first), value_type (second)))
1129 error (_("Incompatible types on DWARF stack"));
1130
1131 switch (op)
1132 {
1133 case DW_OP_and:
1134 dwarf_require_integral (value_type (first));
1135 dwarf_require_integral (value_type (second));
1136 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1137 break;
1138 case DW_OP_div:
1139 result_val = value_binop (first, second, BINOP_DIV);
1140 break;
1141 case DW_OP_minus:
1142 result_val = value_binop (first, second, BINOP_SUB);
1143 break;
1144 case DW_OP_mod:
1145 {
1146 int cast_back = 0;
1147 struct type *orig_type = value_type (first);
1148
1149 /* We have to special-case "old-style" untyped values
1150 -- these must have mod computed using unsigned
1151 math. */
1152 if (orig_type == address_type)
1153 {
1154 struct type *utype
1155 = get_unsigned_type (ctx->gdbarch, orig_type);
1156
1157 cast_back = 1;
1158 first = value_cast (utype, first);
1159 second = value_cast (utype, second);
1160 }
1161 /* Note that value_binop doesn't handle float or
1162 decimal float here. This seems unimportant. */
1163 result_val = value_binop (first, second, BINOP_MOD);
1164 if (cast_back)
1165 result_val = value_cast (orig_type, result_val);
1166 }
1167 break;
1168 case DW_OP_mul:
1169 result_val = value_binop (first, second, BINOP_MUL);
1170 break;
1171 case DW_OP_or:
1172 dwarf_require_integral (value_type (first));
1173 dwarf_require_integral (value_type (second));
1174 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1175 break;
1176 case DW_OP_plus:
1177 result_val = value_binop (first, second, BINOP_ADD);
1178 break;
1179 case DW_OP_shl:
1180 dwarf_require_integral (value_type (first));
1181 dwarf_require_integral (value_type (second));
1182 result_val = value_binop (first, second, BINOP_LSH);
1183 break;
1184 case DW_OP_shr:
1185 dwarf_require_integral (value_type (first));
1186 dwarf_require_integral (value_type (second));
1187 if (!TYPE_UNSIGNED (value_type (first)))
1188 {
1189 struct type *utype
1190 = get_unsigned_type (ctx->gdbarch, value_type (first));
1191
1192 first = value_cast (utype, first);
1193 }
1194
1195 result_val = value_binop (first, second, BINOP_RSH);
1196 /* Make sure we wind up with the same type we started
1197 with. */
1198 if (value_type (result_val) != value_type (second))
1199 result_val = value_cast (value_type (second), result_val);
1200 break;
1201 case DW_OP_shra:
1202 dwarf_require_integral (value_type (first));
1203 dwarf_require_integral (value_type (second));
1204 if (TYPE_UNSIGNED (value_type (first)))
1205 {
1206 struct type *stype
1207 = get_signed_type (ctx->gdbarch, value_type (first));
1208
1209 first = value_cast (stype, first);
1210 }
1211
1212 result_val = value_binop (first, second, BINOP_RSH);
1213 /* Make sure we wind up with the same type we started
1214 with. */
1215 if (value_type (result_val) != value_type (second))
1216 result_val = value_cast (value_type (second), result_val);
1217 break;
1218 case DW_OP_xor:
1219 dwarf_require_integral (value_type (first));
1220 dwarf_require_integral (value_type (second));
1221 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1222 break;
1223 case DW_OP_le:
1224 /* A <= B is !(B < A). */
1225 result = ! value_less (second, first);
1226 result_val = value_from_ulongest (address_type, result);
1227 break;
1228 case DW_OP_ge:
1229 /* A >= B is !(A < B). */
1230 result = ! value_less (first, second);
1231 result_val = value_from_ulongest (address_type, result);
1232 break;
1233 case DW_OP_eq:
1234 result = value_equal (first, second);
1235 result_val = value_from_ulongest (address_type, result);
1236 break;
1237 case DW_OP_lt:
1238 result = value_less (first, second);
1239 result_val = value_from_ulongest (address_type, result);
1240 break;
1241 case DW_OP_gt:
1242 /* A > B is B < A. */
1243 result = value_less (second, first);
1244 result_val = value_from_ulongest (address_type, result);
1245 break;
1246 case DW_OP_ne:
1247 result = ! value_equal (first, second);
1248 result_val = value_from_ulongest (address_type, result);
1249 break;
1250 default:
1251 internal_error (__FILE__, __LINE__,
1252 _("Can't be reached."));
1253 }
1254 }
1255 break;
1256
1257 case DW_OP_call_frame_cfa:
1258 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1259 result_val = value_from_ulongest (address_type, result);
1260 in_stack_memory = 1;
1261 break;
1262
1263 case DW_OP_GNU_push_tls_address:
1264 /* Variable is at a constant offset in the thread-local
1265 storage block into the objfile for the current thread and
1266 the dynamic linker module containing this expression. Here
1267 we return returns the offset from that base. The top of the
1268 stack has the offset from the beginning of the thread
1269 control block at which the variable is located. Nothing
1270 should follow this operator, so the top of stack would be
1271 returned. */
1272 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1273 dwarf_expr_pop (ctx);
1274 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1275 result_val = value_from_ulongest (address_type, result);
1276 break;
1277
1278 case DW_OP_skip:
1279 offset = extract_signed_integer (op_ptr, 2, byte_order);
1280 op_ptr += 2;
1281 op_ptr += offset;
1282 goto no_push;
1283
1284 case DW_OP_bra:
1285 {
1286 struct value *val;
1287
1288 offset = extract_signed_integer (op_ptr, 2, byte_order);
1289 op_ptr += 2;
1290 val = dwarf_expr_fetch (ctx, 0);
1291 dwarf_require_integral (value_type (val));
1292 if (value_as_long (val) != 0)
1293 op_ptr += offset;
1294 dwarf_expr_pop (ctx);
1295 }
1296 goto no_push;
1297
1298 case DW_OP_nop:
1299 goto no_push;
1300
1301 case DW_OP_piece:
1302 {
1303 ULONGEST size;
1304
1305 /* Record the piece. */
1306 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1307 add_piece (ctx, 8 * size, 0);
1308
1309 /* Pop off the address/regnum, and reset the location
1310 type. */
1311 if (ctx->location != DWARF_VALUE_LITERAL
1312 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1313 dwarf_expr_pop (ctx);
1314 ctx->location = DWARF_VALUE_MEMORY;
1315 }
1316 goto no_push;
1317
1318 case DW_OP_bit_piece:
1319 {
1320 ULONGEST size, offset;
1321
1322 /* Record the piece. */
1323 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1324 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1325 add_piece (ctx, size, offset);
1326
1327 /* Pop off the address/regnum, and reset the location
1328 type. */
1329 if (ctx->location != DWARF_VALUE_LITERAL
1330 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1331 dwarf_expr_pop (ctx);
1332 ctx->location = DWARF_VALUE_MEMORY;
1333 }
1334 goto no_push;
1335
1336 case DW_OP_GNU_uninit:
1337 if (op_ptr != op_end)
1338 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1339 "be the very last op."));
1340
1341 ctx->initialized = 0;
1342 goto no_push;
1343
1344 case DW_OP_call2:
1345 {
1346 cu_offset offset;
1347
1348 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1349 op_ptr += 2;
1350 ctx->funcs->dwarf_call (ctx, offset);
1351 }
1352 goto no_push;
1353
1354 case DW_OP_call4:
1355 {
1356 cu_offset offset;
1357
1358 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1359 op_ptr += 4;
1360 ctx->funcs->dwarf_call (ctx, offset);
1361 }
1362 goto no_push;
1363
1364 case DW_OP_GNU_entry_value:
1365 {
1366 ULONGEST len;
1367 int dwarf_reg;
1368 CORE_ADDR deref_size;
1369
1370 op_ptr = read_uleb128 (op_ptr, op_end, &len);
1371 if (op_ptr + len > op_end)
1372 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1373
1374 dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1375 if (dwarf_reg != -1)
1376 {
1377 op_ptr += len;
1378 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1379 0 /* unused */,
1380 -1 /* deref_size */);
1381 goto no_push;
1382 }
1383
1384 dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr, op_ptr + len,
1385 &deref_size);
1386 if (dwarf_reg != -1)
1387 {
1388 if (deref_size == -1)
1389 deref_size = ctx->addr_size;
1390 op_ptr += len;
1391 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1392 0 /* unused */,
1393 deref_size);
1394 goto no_push;
1395 }
1396
1397 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1398 "supported only for single DW_OP_reg* "
1399 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1400 }
1401
1402 case DW_OP_GNU_const_type:
1403 {
1404 cu_offset type_die;
1405 int n;
1406 const gdb_byte *data;
1407 struct type *type;
1408
1409 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
1410 type_die.cu_off = uoffset;
1411 n = *op_ptr++;
1412 data = op_ptr;
1413 op_ptr += n;
1414
1415 type = dwarf_get_base_type (ctx, type_die, n);
1416 result_val = value_from_contents (type, data);
1417 }
1418 break;
1419
1420 case DW_OP_GNU_regval_type:
1421 {
1422 cu_offset type_die;
1423 struct type *type;
1424
1425 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1426 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
1427 type_die.cu_off = uoffset;
1428
1429 type = dwarf_get_base_type (ctx, type_die, 0);
1430 result = (ctx->funcs->read_reg) (ctx->baton, reg);
1431 result_val = value_from_ulongest (address_type, result);
1432 result_val = value_from_contents (type,
1433 value_contents_all (result_val));
1434 }
1435 break;
1436
1437 case DW_OP_GNU_convert:
1438 case DW_OP_GNU_reinterpret:
1439 {
1440 cu_offset type_die;
1441 struct type *type;
1442
1443 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
1444 type_die.cu_off = uoffset;
1445
1446 if (type_die.cu_off == 0)
1447 type = address_type;
1448 else
1449 type = dwarf_get_base_type (ctx, type_die, 0);
1450
1451 result_val = dwarf_expr_fetch (ctx, 0);
1452 dwarf_expr_pop (ctx);
1453
1454 if (op == DW_OP_GNU_convert)
1455 result_val = value_cast (type, result_val);
1456 else if (type == value_type (result_val))
1457 {
1458 /* Nothing. */
1459 }
1460 else if (TYPE_LENGTH (type)
1461 != TYPE_LENGTH (value_type (result_val)))
1462 error (_("DW_OP_GNU_reinterpret has wrong size"));
1463 else
1464 result_val
1465 = value_from_contents (type,
1466 value_contents_all (result_val));
1467 }
1468 break;
1469
1470 default:
1471 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1472 }
1473
1474 /* Most things push a result value. */
1475 gdb_assert (result_val != NULL);
1476 dwarf_expr_push (ctx, result_val, in_stack_memory);
1477 no_push:
1478 ;
1479 }
1480
1481 /* To simplify our main caller, if the result is an implicit
1482 pointer, then make a pieced value. This is ok because we can't
1483 have implicit pointers in contexts where pieces are invalid. */
1484 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1485 add_piece (ctx, 8 * ctx->addr_size, 0);
1486
1487 abort_expression:
1488 ctx->recursion_depth--;
1489 gdb_assert (ctx->recursion_depth >= 0);
1490 }
1491
1492 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1493
1494 void
1495 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1496 {
1497 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1498 }
1499
1500 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1501
1502 CORE_ADDR
1503 ctx_no_get_frame_cfa (void *baton)
1504 {
1505 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1506 }
1507
1508 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1509
1510 CORE_ADDR
1511 ctx_no_get_frame_pc (void *baton)
1512 {
1513 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1514 }
1515
1516 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1517
1518 CORE_ADDR
1519 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1520 {
1521 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1522 }
1523
1524 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1525
1526 void
1527 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1528 {
1529 error (_("%s is invalid in this context"), "DW_OP_call*");
1530 }
1531
1532 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1533
1534 struct type *
1535 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1536 {
1537 error (_("Support for typed DWARF is not supported in this context"));
1538 }
1539
1540 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1541 implementation. */
1542
1543 void
1544 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1545 int dwarf_reg, CORE_ADDR fb_offset,
1546 int deref_size)
1547 {
1548 internal_error (__FILE__, __LINE__,
1549 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1550 }
1551
1552 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1553
1554 CORE_ADDR
1555 ctx_no_get_addr_index (void *baton, unsigned int index)
1556 {
1557 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1558 }
1559
1560 /* Provide a prototype to silence -Wmissing-prototypes. */
1561 extern initialize_file_ftype _initialize_dwarf2expr;
1562
1563 void
1564 _initialize_dwarf2expr (void)
1565 {
1566 dwarf_arch_cookie
1567 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1568 }
This page took 0.062941 seconds and 5 git commands to generate.