doc/ChangeLog:
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24 #include "defs.h"
25 #include "symtab.h"
26 #include "gdbtypes.h"
27 #include "value.h"
28 #include "gdbcore.h"
29 #include "elf/dwarf2.h"
30 #include "dwarf2expr.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (void);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
48 retval->num_pieces = 0;
49 retval->pieces = 0;
50 return retval;
51 }
52
53 /* Release the memory allocated to CTX. */
54
55 void
56 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
57 {
58 xfree (ctx->stack);
59 xfree (ctx->pieces);
60 xfree (ctx);
61 }
62
63 /* Expand the memory allocated to CTX's stack to contain at least
64 NEED more elements than are currently used. */
65
66 static void
67 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
68 {
69 if (ctx->stack_len + need > ctx->stack_allocated)
70 {
71 size_t newlen = ctx->stack_len + need + 10;
72 ctx->stack = xrealloc (ctx->stack,
73 newlen * sizeof (CORE_ADDR));
74 ctx->stack_allocated = newlen;
75 }
76 }
77
78 /* Push VALUE onto CTX's stack. */
79
80 void
81 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
82 {
83 dwarf_expr_grow_stack (ctx, 1);
84 ctx->stack[ctx->stack_len++] = value;
85 }
86
87 /* Pop the top item off of CTX's stack. */
88
89 void
90 dwarf_expr_pop (struct dwarf_expr_context *ctx)
91 {
92 if (ctx->stack_len <= 0)
93 error (_("dwarf expression stack underflow"));
94 ctx->stack_len--;
95 }
96
97 /* Retrieve the N'th item on CTX's stack. */
98
99 CORE_ADDR
100 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
101 {
102 if (ctx->stack_len <= n)
103 error (_("Asked for position %d of stack, stack only has %d elements on it."),
104 n, ctx->stack_len);
105 return ctx->stack[ctx->stack_len - (1 + n)];
106
107 }
108
109 /* Add a new piece to CTX's piece list. */
110 static void
111 add_piece (struct dwarf_expr_context *ctx,
112 int in_reg, CORE_ADDR value, ULONGEST size)
113 {
114 struct dwarf_expr_piece *p;
115
116 ctx->num_pieces++;
117
118 if (ctx->pieces)
119 ctx->pieces = xrealloc (ctx->pieces,
120 (ctx->num_pieces
121 * sizeof (struct dwarf_expr_piece)));
122 else
123 ctx->pieces = xmalloc (ctx->num_pieces
124 * sizeof (struct dwarf_expr_piece));
125
126 p = &ctx->pieces[ctx->num_pieces - 1];
127 p->in_reg = in_reg;
128 p->value = value;
129 p->size = size;
130 }
131
132 /* Evaluate the expression at ADDR (LEN bytes long) using the context
133 CTX. */
134
135 void
136 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
137 {
138 execute_stack_op (ctx, addr, addr + len);
139 }
140
141 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
142 by R, and return the new value of BUF. Verify that it doesn't extend
143 past BUF_END. */
144
145 gdb_byte *
146 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
147 {
148 unsigned shift = 0;
149 ULONGEST result = 0;
150 gdb_byte byte;
151
152 while (1)
153 {
154 if (buf >= buf_end)
155 error (_("read_uleb128: Corrupted DWARF expression."));
156
157 byte = *buf++;
158 result |= (byte & 0x7f) << shift;
159 if ((byte & 0x80) == 0)
160 break;
161 shift += 7;
162 }
163 *r = result;
164 return buf;
165 }
166
167 /* Decode the signed LEB128 constant at BUF into the variable pointed to
168 by R, and return the new value of BUF. Verify that it doesn't extend
169 past BUF_END. */
170
171 gdb_byte *
172 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
173 {
174 unsigned shift = 0;
175 LONGEST result = 0;
176 gdb_byte byte;
177
178 while (1)
179 {
180 if (buf >= buf_end)
181 error (_("read_sleb128: Corrupted DWARF expression."));
182
183 byte = *buf++;
184 result |= (byte & 0x7f) << shift;
185 shift += 7;
186 if ((byte & 0x80) == 0)
187 break;
188 }
189 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
190 result |= -(1 << shift);
191
192 *r = result;
193 return buf;
194 }
195
196 /* Read an address from BUF, and verify that it doesn't extend past
197 BUF_END. The address is returned, and *BYTES_READ is set to the
198 number of bytes read from BUF. */
199
200 CORE_ADDR
201 dwarf2_read_address (gdb_byte *buf, gdb_byte *buf_end, int *bytes_read)
202 {
203 CORE_ADDR result;
204
205 if (buf_end - buf < TARGET_ADDR_BIT / TARGET_CHAR_BIT)
206 error (_("dwarf2_read_address: Corrupted DWARF expression."));
207
208 *bytes_read = TARGET_ADDR_BIT / TARGET_CHAR_BIT;
209
210 /* For most architectures, calling extract_unsigned_integer() alone
211 is sufficient for extracting an address. However, some
212 architectures (e.g. MIPS) use signed addresses and using
213 extract_unsigned_integer() will not produce a correct
214 result. Turning the unsigned integer into a value and then
215 decomposing that value as an address will cause
216 gdbarch_integer_to_address() to be invoked for those
217 architectures which require it. Thus, using value_as_address()
218 will produce the correct result for both types of architectures.
219
220 One concern regarding the use of values for this purpose is
221 efficiency. Obviously, these extra calls will take more time to
222 execute and creating a value takes more space, space which will
223 have to be garbage collected at a later time. If constructing
224 and then decomposing a value for this purpose proves to be too
225 inefficient, then gdbarch_integer_to_address() can be called
226 directly.
227
228 The use of `unsigned_address_type' in the code below refers to
229 the type of buf and has no bearing on the signedness of the
230 address being returned. */
231
232 result = value_as_address (value_from_longest
233 (unsigned_address_type (),
234 extract_unsigned_integer
235 (buf,
236 TARGET_ADDR_BIT / TARGET_CHAR_BIT)));
237
238 return result;
239 }
240
241 /* Return the type of an address, for unsigned arithmetic. */
242
243 static struct type *
244 unsigned_address_type (void)
245 {
246 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
247 {
248 case 2:
249 return builtin_type_uint16;
250 case 4:
251 return builtin_type_uint32;
252 case 8:
253 return builtin_type_uint64;
254 default:
255 internal_error (__FILE__, __LINE__,
256 _("Unsupported address size.\n"));
257 }
258 }
259
260 /* Return the type of an address, for signed arithmetic. */
261
262 static struct type *
263 signed_address_type (void)
264 {
265 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
266 {
267 case 2:
268 return builtin_type_int16;
269 case 4:
270 return builtin_type_int32;
271 case 8:
272 return builtin_type_int64;
273 default:
274 internal_error (__FILE__, __LINE__,
275 _("Unsupported address size.\n"));
276 }
277 }
278 \f
279 /* The engine for the expression evaluator. Using the context in CTX,
280 evaluate the expression between OP_PTR and OP_END. */
281
282 static void
283 execute_stack_op (struct dwarf_expr_context *ctx,
284 gdb_byte *op_ptr, gdb_byte *op_end)
285 {
286 ctx->in_reg = 0;
287
288 while (op_ptr < op_end)
289 {
290 enum dwarf_location_atom op = *op_ptr++;
291 CORE_ADDR result;
292 ULONGEST uoffset, reg;
293 LONGEST offset;
294 int bytes_read;
295
296 switch (op)
297 {
298 case DW_OP_lit0:
299 case DW_OP_lit1:
300 case DW_OP_lit2:
301 case DW_OP_lit3:
302 case DW_OP_lit4:
303 case DW_OP_lit5:
304 case DW_OP_lit6:
305 case DW_OP_lit7:
306 case DW_OP_lit8:
307 case DW_OP_lit9:
308 case DW_OP_lit10:
309 case DW_OP_lit11:
310 case DW_OP_lit12:
311 case DW_OP_lit13:
312 case DW_OP_lit14:
313 case DW_OP_lit15:
314 case DW_OP_lit16:
315 case DW_OP_lit17:
316 case DW_OP_lit18:
317 case DW_OP_lit19:
318 case DW_OP_lit20:
319 case DW_OP_lit21:
320 case DW_OP_lit22:
321 case DW_OP_lit23:
322 case DW_OP_lit24:
323 case DW_OP_lit25:
324 case DW_OP_lit26:
325 case DW_OP_lit27:
326 case DW_OP_lit28:
327 case DW_OP_lit29:
328 case DW_OP_lit30:
329 case DW_OP_lit31:
330 result = op - DW_OP_lit0;
331 break;
332
333 case DW_OP_addr:
334 result = dwarf2_read_address (op_ptr, op_end, &bytes_read);
335 op_ptr += bytes_read;
336 break;
337
338 case DW_OP_const1u:
339 result = extract_unsigned_integer (op_ptr, 1);
340 op_ptr += 1;
341 break;
342 case DW_OP_const1s:
343 result = extract_signed_integer (op_ptr, 1);
344 op_ptr += 1;
345 break;
346 case DW_OP_const2u:
347 result = extract_unsigned_integer (op_ptr, 2);
348 op_ptr += 2;
349 break;
350 case DW_OP_const2s:
351 result = extract_signed_integer (op_ptr, 2);
352 op_ptr += 2;
353 break;
354 case DW_OP_const4u:
355 result = extract_unsigned_integer (op_ptr, 4);
356 op_ptr += 4;
357 break;
358 case DW_OP_const4s:
359 result = extract_signed_integer (op_ptr, 4);
360 op_ptr += 4;
361 break;
362 case DW_OP_const8u:
363 result = extract_unsigned_integer (op_ptr, 8);
364 op_ptr += 8;
365 break;
366 case DW_OP_const8s:
367 result = extract_signed_integer (op_ptr, 8);
368 op_ptr += 8;
369 break;
370 case DW_OP_constu:
371 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
372 result = uoffset;
373 break;
374 case DW_OP_consts:
375 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
376 result = offset;
377 break;
378
379 /* The DW_OP_reg operations are required to occur alone in
380 location expressions. */
381 case DW_OP_reg0:
382 case DW_OP_reg1:
383 case DW_OP_reg2:
384 case DW_OP_reg3:
385 case DW_OP_reg4:
386 case DW_OP_reg5:
387 case DW_OP_reg6:
388 case DW_OP_reg7:
389 case DW_OP_reg8:
390 case DW_OP_reg9:
391 case DW_OP_reg10:
392 case DW_OP_reg11:
393 case DW_OP_reg12:
394 case DW_OP_reg13:
395 case DW_OP_reg14:
396 case DW_OP_reg15:
397 case DW_OP_reg16:
398 case DW_OP_reg17:
399 case DW_OP_reg18:
400 case DW_OP_reg19:
401 case DW_OP_reg20:
402 case DW_OP_reg21:
403 case DW_OP_reg22:
404 case DW_OP_reg23:
405 case DW_OP_reg24:
406 case DW_OP_reg25:
407 case DW_OP_reg26:
408 case DW_OP_reg27:
409 case DW_OP_reg28:
410 case DW_OP_reg29:
411 case DW_OP_reg30:
412 case DW_OP_reg31:
413 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
414 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
415 "used either alone or in conjuction with DW_OP_piece."));
416
417 result = op - DW_OP_reg0;
418 ctx->in_reg = 1;
419
420 break;
421
422 case DW_OP_regx:
423 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
424 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
425 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
426 "used either alone or in conjuction with DW_OP_piece."));
427
428 result = reg;
429 ctx->in_reg = 1;
430 break;
431
432 case DW_OP_breg0:
433 case DW_OP_breg1:
434 case DW_OP_breg2:
435 case DW_OP_breg3:
436 case DW_OP_breg4:
437 case DW_OP_breg5:
438 case DW_OP_breg6:
439 case DW_OP_breg7:
440 case DW_OP_breg8:
441 case DW_OP_breg9:
442 case DW_OP_breg10:
443 case DW_OP_breg11:
444 case DW_OP_breg12:
445 case DW_OP_breg13:
446 case DW_OP_breg14:
447 case DW_OP_breg15:
448 case DW_OP_breg16:
449 case DW_OP_breg17:
450 case DW_OP_breg18:
451 case DW_OP_breg19:
452 case DW_OP_breg20:
453 case DW_OP_breg21:
454 case DW_OP_breg22:
455 case DW_OP_breg23:
456 case DW_OP_breg24:
457 case DW_OP_breg25:
458 case DW_OP_breg26:
459 case DW_OP_breg27:
460 case DW_OP_breg28:
461 case DW_OP_breg29:
462 case DW_OP_breg30:
463 case DW_OP_breg31:
464 {
465 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
466 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
467 result += offset;
468 }
469 break;
470 case DW_OP_bregx:
471 {
472 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
473 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
474 result = (ctx->read_reg) (ctx->baton, reg);
475 result += offset;
476 }
477 break;
478 case DW_OP_fbreg:
479 {
480 gdb_byte *datastart;
481 size_t datalen;
482 unsigned int before_stack_len;
483
484 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
485 /* Rather than create a whole new context, we simply
486 record the stack length before execution, then reset it
487 afterwards, effectively erasing whatever the recursive
488 call put there. */
489 before_stack_len = ctx->stack_len;
490 /* FIXME: cagney/2003-03-26: This code should be using
491 get_frame_base_address(), and then implement a dwarf2
492 specific this_base method. */
493 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
494 dwarf_expr_eval (ctx, datastart, datalen);
495 result = dwarf_expr_fetch (ctx, 0);
496 if (ctx->in_reg)
497 result = (ctx->read_reg) (ctx->baton, result);
498 result = result + offset;
499 ctx->stack_len = before_stack_len;
500 ctx->in_reg = 0;
501 }
502 break;
503 case DW_OP_dup:
504 result = dwarf_expr_fetch (ctx, 0);
505 break;
506
507 case DW_OP_drop:
508 dwarf_expr_pop (ctx);
509 goto no_push;
510
511 case DW_OP_pick:
512 offset = *op_ptr++;
513 result = dwarf_expr_fetch (ctx, offset);
514 break;
515
516 case DW_OP_over:
517 result = dwarf_expr_fetch (ctx, 1);
518 break;
519
520 case DW_OP_rot:
521 {
522 CORE_ADDR t1, t2, t3;
523
524 if (ctx->stack_len < 3)
525 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
526 ctx->stack_len);
527 t1 = ctx->stack[ctx->stack_len - 1];
528 t2 = ctx->stack[ctx->stack_len - 2];
529 t3 = ctx->stack[ctx->stack_len - 3];
530 ctx->stack[ctx->stack_len - 1] = t2;
531 ctx->stack[ctx->stack_len - 2] = t3;
532 ctx->stack[ctx->stack_len - 3] = t1;
533 goto no_push;
534 }
535
536 case DW_OP_deref:
537 case DW_OP_deref_size:
538 case DW_OP_abs:
539 case DW_OP_neg:
540 case DW_OP_not:
541 case DW_OP_plus_uconst:
542 /* Unary operations. */
543 result = dwarf_expr_fetch (ctx, 0);
544 dwarf_expr_pop (ctx);
545
546 switch (op)
547 {
548 case DW_OP_deref:
549 {
550 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
551 int bytes_read;
552
553 (ctx->read_mem) (ctx->baton, buf, result,
554 TARGET_ADDR_BIT / TARGET_CHAR_BIT);
555 result = dwarf2_read_address (buf,
556 buf + (TARGET_ADDR_BIT
557 / TARGET_CHAR_BIT),
558 &bytes_read);
559 }
560 break;
561
562 case DW_OP_deref_size:
563 {
564 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
565 int bytes_read;
566
567 (ctx->read_mem) (ctx->baton, buf, result, *op_ptr++);
568 result = dwarf2_read_address (buf,
569 buf + (TARGET_ADDR_BIT
570 / TARGET_CHAR_BIT),
571 &bytes_read);
572 }
573 break;
574
575 case DW_OP_abs:
576 if ((signed int) result < 0)
577 result = -result;
578 break;
579 case DW_OP_neg:
580 result = -result;
581 break;
582 case DW_OP_not:
583 result = ~result;
584 break;
585 case DW_OP_plus_uconst:
586 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
587 result += reg;
588 break;
589 }
590 break;
591
592 case DW_OP_and:
593 case DW_OP_div:
594 case DW_OP_minus:
595 case DW_OP_mod:
596 case DW_OP_mul:
597 case DW_OP_or:
598 case DW_OP_plus:
599 case DW_OP_shl:
600 case DW_OP_shr:
601 case DW_OP_shra:
602 case DW_OP_xor:
603 case DW_OP_le:
604 case DW_OP_ge:
605 case DW_OP_eq:
606 case DW_OP_lt:
607 case DW_OP_gt:
608 case DW_OP_ne:
609 {
610 /* Binary operations. Use the value engine to do computations in
611 the right width. */
612 CORE_ADDR first, second;
613 enum exp_opcode binop;
614 struct value *val1, *val2;
615
616 second = dwarf_expr_fetch (ctx, 0);
617 dwarf_expr_pop (ctx);
618
619 first = dwarf_expr_fetch (ctx, 0);
620 dwarf_expr_pop (ctx);
621
622 val1 = value_from_longest (unsigned_address_type (), first);
623 val2 = value_from_longest (unsigned_address_type (), second);
624
625 switch (op)
626 {
627 case DW_OP_and:
628 binop = BINOP_BITWISE_AND;
629 break;
630 case DW_OP_div:
631 binop = BINOP_DIV;
632 break;
633 case DW_OP_minus:
634 binop = BINOP_SUB;
635 break;
636 case DW_OP_mod:
637 binop = BINOP_MOD;
638 break;
639 case DW_OP_mul:
640 binop = BINOP_MUL;
641 break;
642 case DW_OP_or:
643 binop = BINOP_BITWISE_IOR;
644 break;
645 case DW_OP_plus:
646 binop = BINOP_ADD;
647 break;
648 case DW_OP_shl:
649 binop = BINOP_LSH;
650 break;
651 case DW_OP_shr:
652 binop = BINOP_RSH;
653 break;
654 case DW_OP_shra:
655 binop = BINOP_RSH;
656 val1 = value_from_longest (signed_address_type (), first);
657 break;
658 case DW_OP_xor:
659 binop = BINOP_BITWISE_XOR;
660 break;
661 case DW_OP_le:
662 binop = BINOP_LEQ;
663 break;
664 case DW_OP_ge:
665 binop = BINOP_GEQ;
666 break;
667 case DW_OP_eq:
668 binop = BINOP_EQUAL;
669 break;
670 case DW_OP_lt:
671 binop = BINOP_LESS;
672 break;
673 case DW_OP_gt:
674 binop = BINOP_GTR;
675 break;
676 case DW_OP_ne:
677 binop = BINOP_NOTEQUAL;
678 break;
679 default:
680 internal_error (__FILE__, __LINE__,
681 _("Can't be reached."));
682 }
683 result = value_as_long (value_binop (val1, val2, binop));
684 }
685 break;
686
687 case DW_OP_GNU_push_tls_address:
688 /* Variable is at a constant offset in the thread-local
689 storage block into the objfile for the current thread and
690 the dynamic linker module containing this expression. Here
691 we return returns the offset from that base. The top of the
692 stack has the offset from the beginning of the thread
693 control block at which the variable is located. Nothing
694 should follow this operator, so the top of stack would be
695 returned. */
696 result = dwarf_expr_fetch (ctx, 0);
697 dwarf_expr_pop (ctx);
698 result = (ctx->get_tls_address) (ctx->baton, result);
699 break;
700
701 case DW_OP_skip:
702 offset = extract_signed_integer (op_ptr, 2);
703 op_ptr += 2;
704 op_ptr += offset;
705 goto no_push;
706
707 case DW_OP_bra:
708 offset = extract_signed_integer (op_ptr, 2);
709 op_ptr += 2;
710 if (dwarf_expr_fetch (ctx, 0) != 0)
711 op_ptr += offset;
712 dwarf_expr_pop (ctx);
713 goto no_push;
714
715 case DW_OP_nop:
716 goto no_push;
717
718 case DW_OP_piece:
719 {
720 ULONGEST size;
721 CORE_ADDR addr_or_regnum;
722
723 /* Record the piece. */
724 op_ptr = read_uleb128 (op_ptr, op_end, &size);
725 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
726 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
727
728 /* Pop off the address/regnum, and clear the in_reg flag. */
729 dwarf_expr_pop (ctx);
730 ctx->in_reg = 0;
731 }
732 goto no_push;
733
734 default:
735 error (_("Unhandled dwarf expression opcode 0x%x"), op);
736 }
737
738 /* Most things push a result value. */
739 dwarf_expr_push (ctx, result);
740 no_push:;
741 }
742 }
This page took 0.085099 seconds and 4 git commands to generate.