* linux-m68k-low.c: Include <asm/ptrace.h>
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44
45 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
46 retval->stack_len = 0;
47 retval->stack_allocated = 10;
48 retval->stack = xmalloc (retval->stack_allocated
49 * sizeof (struct dwarf_stack_value));
50 retval->num_pieces = 0;
51 retval->pieces = 0;
52 retval->max_recursion_depth = 0x100;
53 return retval;
54 }
55
56 /* Release the memory allocated to CTX. */
57
58 void
59 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
60 {
61 xfree (ctx->stack);
62 xfree (ctx->pieces);
63 xfree (ctx);
64 }
65
66 /* Helper for make_cleanup_free_dwarf_expr_context. */
67
68 static void
69 free_dwarf_expr_context_cleanup (void *arg)
70 {
71 free_dwarf_expr_context (arg);
72 }
73
74 /* Return a cleanup that calls free_dwarf_expr_context. */
75
76 struct cleanup *
77 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
78 {
79 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
80 }
81
82 /* Expand the memory allocated to CTX's stack to contain at least
83 NEED more elements than are currently used. */
84
85 static void
86 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
87 {
88 if (ctx->stack_len + need > ctx->stack_allocated)
89 {
90 size_t newlen = ctx->stack_len + need + 10;
91
92 ctx->stack = xrealloc (ctx->stack,
93 newlen * sizeof (struct dwarf_stack_value));
94 ctx->stack_allocated = newlen;
95 }
96 }
97
98 /* Push VALUE onto CTX's stack. */
99
100 void
101 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value,
102 int in_stack_memory)
103 {
104 struct dwarf_stack_value *v;
105
106 dwarf_expr_grow_stack (ctx, 1);
107 v = &ctx->stack[ctx->stack_len++];
108 v->value = value;
109 v->in_stack_memory = in_stack_memory;
110 }
111
112 /* Pop the top item off of CTX's stack. */
113
114 void
115 dwarf_expr_pop (struct dwarf_expr_context *ctx)
116 {
117 if (ctx->stack_len <= 0)
118 error (_("dwarf expression stack underflow"));
119 ctx->stack_len--;
120 }
121
122 /* Retrieve the N'th item on CTX's stack. */
123
124 CORE_ADDR
125 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
126 {
127 if (ctx->stack_len <= n)
128 error (_("Asked for position %d of stack, stack only has %d elements on it."),
129 n, ctx->stack_len);
130 return ctx->stack[ctx->stack_len - (1 + n)].value;
131
132 }
133
134 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
135
136 int
137 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
138 {
139 if (ctx->stack_len <= n)
140 error (_("Asked for position %d of stack, stack only has %d elements on it."),
141 n, ctx->stack_len);
142 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
143
144 }
145
146 /* Add a new piece to CTX's piece list. */
147 static void
148 add_piece (struct dwarf_expr_context *ctx, ULONGEST size)
149 {
150 struct dwarf_expr_piece *p;
151
152 ctx->num_pieces++;
153
154 if (ctx->pieces)
155 ctx->pieces = xrealloc (ctx->pieces,
156 (ctx->num_pieces
157 * sizeof (struct dwarf_expr_piece)));
158 else
159 ctx->pieces = xmalloc (ctx->num_pieces
160 * sizeof (struct dwarf_expr_piece));
161
162 p = &ctx->pieces[ctx->num_pieces - 1];
163 p->location = ctx->location;
164 p->size = size;
165 if (p->location == DWARF_VALUE_LITERAL)
166 {
167 p->v.literal.data = ctx->data;
168 p->v.literal.length = ctx->len;
169 }
170 else
171 {
172 p->v.expr.value = dwarf_expr_fetch (ctx, 0);
173 p->v.expr.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
174 }
175 }
176
177 /* Evaluate the expression at ADDR (LEN bytes long) using the context
178 CTX. */
179
180 void
181 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
182 {
183 int old_recursion_depth = ctx->recursion_depth;
184
185 execute_stack_op (ctx, addr, addr + len);
186
187 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
188
189 gdb_assert (ctx->recursion_depth == old_recursion_depth);
190 }
191
192 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
193 by R, and return the new value of BUF. Verify that it doesn't extend
194 past BUF_END. */
195
196 gdb_byte *
197 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
198 {
199 unsigned shift = 0;
200 ULONGEST result = 0;
201 gdb_byte byte;
202
203 while (1)
204 {
205 if (buf >= buf_end)
206 error (_("read_uleb128: Corrupted DWARF expression."));
207
208 byte = *buf++;
209 result |= (byte & 0x7f) << shift;
210 if ((byte & 0x80) == 0)
211 break;
212 shift += 7;
213 }
214 *r = result;
215 return buf;
216 }
217
218 /* Decode the signed LEB128 constant at BUF into the variable pointed to
219 by R, and return the new value of BUF. Verify that it doesn't extend
220 past BUF_END. */
221
222 gdb_byte *
223 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
224 {
225 unsigned shift = 0;
226 LONGEST result = 0;
227 gdb_byte byte;
228
229 while (1)
230 {
231 if (buf >= buf_end)
232 error (_("read_sleb128: Corrupted DWARF expression."));
233
234 byte = *buf++;
235 result |= (byte & 0x7f) << shift;
236 shift += 7;
237 if ((byte & 0x80) == 0)
238 break;
239 }
240 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
241 result |= -(1 << shift);
242
243 *r = result;
244 return buf;
245 }
246
247 /* Read an address of size ADDR_SIZE from BUF, and verify that it
248 doesn't extend past BUF_END. */
249
250 CORE_ADDR
251 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
252 gdb_byte *buf_end, int addr_size)
253 {
254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
255
256 if (buf_end - buf < addr_size)
257 error (_("dwarf2_read_address: Corrupted DWARF expression."));
258
259 /* For most architectures, calling extract_unsigned_integer() alone
260 is sufficient for extracting an address. However, some
261 architectures (e.g. MIPS) use signed addresses and using
262 extract_unsigned_integer() will not produce a correct
263 result. Make sure we invoke gdbarch_integer_to_address()
264 for those architectures which require it.
265
266 The use of `unsigned_address_type' in the code below refers to
267 the type of buf and has no bearing on the signedness of the
268 address being returned. */
269
270 if (gdbarch_integer_to_address_p (gdbarch))
271 return gdbarch_integer_to_address
272 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
273
274 return extract_unsigned_integer (buf, addr_size, byte_order);
275 }
276
277 /* Return the type of an address of size ADDR_SIZE,
278 for unsigned arithmetic. */
279
280 static struct type *
281 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
282 {
283 switch (addr_size)
284 {
285 case 2:
286 return builtin_type (gdbarch)->builtin_uint16;
287 case 4:
288 return builtin_type (gdbarch)->builtin_uint32;
289 case 8:
290 return builtin_type (gdbarch)->builtin_uint64;
291 default:
292 internal_error (__FILE__, __LINE__,
293 _("Unsupported address size.\n"));
294 }
295 }
296
297 /* Return the type of an address of size ADDR_SIZE,
298 for signed arithmetic. */
299
300 static struct type *
301 signed_address_type (struct gdbarch *gdbarch, int addr_size)
302 {
303 switch (addr_size)
304 {
305 case 2:
306 return builtin_type (gdbarch)->builtin_int16;
307 case 4:
308 return builtin_type (gdbarch)->builtin_int32;
309 case 8:
310 return builtin_type (gdbarch)->builtin_int64;
311 default:
312 internal_error (__FILE__, __LINE__,
313 _("Unsupported address size.\n"));
314 }
315 }
316 \f
317
318 /* Check that the current operator is either at the end of an
319 expression, or that it is followed by a composition operator. */
320
321 static void
322 require_composition (gdb_byte *op_ptr, gdb_byte *op_end, const char *op_name)
323 {
324 /* It seems like DW_OP_GNU_uninit should be handled here. However,
325 it doesn't seem to make sense for DW_OP_*_value, and it was not
326 checked at the other place that this function is called. */
327 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
328 error (_("DWARF-2 expression error: `%s' operations must be "
329 "used either alone or in conjuction with DW_OP_piece "
330 "or DW_OP_bit_piece."),
331 op_name);
332 }
333
334 /* The engine for the expression evaluator. Using the context in CTX,
335 evaluate the expression between OP_PTR and OP_END. */
336
337 static void
338 execute_stack_op (struct dwarf_expr_context *ctx,
339 gdb_byte *op_ptr, gdb_byte *op_end)
340 {
341 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
342
343 ctx->location = DWARF_VALUE_MEMORY;
344 ctx->initialized = 1; /* Default is initialized. */
345
346 if (ctx->recursion_depth > ctx->max_recursion_depth)
347 error (_("DWARF-2 expression error: Loop detected (%d)."),
348 ctx->recursion_depth);
349 ctx->recursion_depth++;
350
351 while (op_ptr < op_end)
352 {
353 enum dwarf_location_atom op = *op_ptr++;
354 CORE_ADDR result;
355 /* Assume the value is not in stack memory.
356 Code that knows otherwise sets this to 1.
357 Some arithmetic on stack addresses can probably be assumed to still
358 be a stack address, but we skip this complication for now.
359 This is just an optimization, so it's always ok to punt
360 and leave this as 0. */
361 int in_stack_memory = 0;
362 ULONGEST uoffset, reg;
363 LONGEST offset;
364
365 switch (op)
366 {
367 case DW_OP_lit0:
368 case DW_OP_lit1:
369 case DW_OP_lit2:
370 case DW_OP_lit3:
371 case DW_OP_lit4:
372 case DW_OP_lit5:
373 case DW_OP_lit6:
374 case DW_OP_lit7:
375 case DW_OP_lit8:
376 case DW_OP_lit9:
377 case DW_OP_lit10:
378 case DW_OP_lit11:
379 case DW_OP_lit12:
380 case DW_OP_lit13:
381 case DW_OP_lit14:
382 case DW_OP_lit15:
383 case DW_OP_lit16:
384 case DW_OP_lit17:
385 case DW_OP_lit18:
386 case DW_OP_lit19:
387 case DW_OP_lit20:
388 case DW_OP_lit21:
389 case DW_OP_lit22:
390 case DW_OP_lit23:
391 case DW_OP_lit24:
392 case DW_OP_lit25:
393 case DW_OP_lit26:
394 case DW_OP_lit27:
395 case DW_OP_lit28:
396 case DW_OP_lit29:
397 case DW_OP_lit30:
398 case DW_OP_lit31:
399 result = op - DW_OP_lit0;
400 break;
401
402 case DW_OP_addr:
403 result = dwarf2_read_address (ctx->gdbarch,
404 op_ptr, op_end, ctx->addr_size);
405 op_ptr += ctx->addr_size;
406 break;
407
408 case DW_OP_const1u:
409 result = extract_unsigned_integer (op_ptr, 1, byte_order);
410 op_ptr += 1;
411 break;
412 case DW_OP_const1s:
413 result = extract_signed_integer (op_ptr, 1, byte_order);
414 op_ptr += 1;
415 break;
416 case DW_OP_const2u:
417 result = extract_unsigned_integer (op_ptr, 2, byte_order);
418 op_ptr += 2;
419 break;
420 case DW_OP_const2s:
421 result = extract_signed_integer (op_ptr, 2, byte_order);
422 op_ptr += 2;
423 break;
424 case DW_OP_const4u:
425 result = extract_unsigned_integer (op_ptr, 4, byte_order);
426 op_ptr += 4;
427 break;
428 case DW_OP_const4s:
429 result = extract_signed_integer (op_ptr, 4, byte_order);
430 op_ptr += 4;
431 break;
432 case DW_OP_const8u:
433 result = extract_unsigned_integer (op_ptr, 8, byte_order);
434 op_ptr += 8;
435 break;
436 case DW_OP_const8s:
437 result = extract_signed_integer (op_ptr, 8, byte_order);
438 op_ptr += 8;
439 break;
440 case DW_OP_constu:
441 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
442 result = uoffset;
443 break;
444 case DW_OP_consts:
445 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
446 result = offset;
447 break;
448
449 /* The DW_OP_reg operations are required to occur alone in
450 location expressions. */
451 case DW_OP_reg0:
452 case DW_OP_reg1:
453 case DW_OP_reg2:
454 case DW_OP_reg3:
455 case DW_OP_reg4:
456 case DW_OP_reg5:
457 case DW_OP_reg6:
458 case DW_OP_reg7:
459 case DW_OP_reg8:
460 case DW_OP_reg9:
461 case DW_OP_reg10:
462 case DW_OP_reg11:
463 case DW_OP_reg12:
464 case DW_OP_reg13:
465 case DW_OP_reg14:
466 case DW_OP_reg15:
467 case DW_OP_reg16:
468 case DW_OP_reg17:
469 case DW_OP_reg18:
470 case DW_OP_reg19:
471 case DW_OP_reg20:
472 case DW_OP_reg21:
473 case DW_OP_reg22:
474 case DW_OP_reg23:
475 case DW_OP_reg24:
476 case DW_OP_reg25:
477 case DW_OP_reg26:
478 case DW_OP_reg27:
479 case DW_OP_reg28:
480 case DW_OP_reg29:
481 case DW_OP_reg30:
482 case DW_OP_reg31:
483 if (op_ptr != op_end
484 && *op_ptr != DW_OP_piece
485 && *op_ptr != DW_OP_GNU_uninit)
486 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
487 "used either alone or in conjuction with DW_OP_piece."));
488
489 result = op - DW_OP_reg0;
490 ctx->location = DWARF_VALUE_REGISTER;
491 break;
492
493 case DW_OP_regx:
494 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
495 require_composition (op_ptr, op_end, "DW_OP_regx");
496
497 result = reg;
498 ctx->location = DWARF_VALUE_REGISTER;
499 break;
500
501 case DW_OP_implicit_value:
502 {
503 ULONGEST len;
504
505 op_ptr = read_uleb128 (op_ptr, op_end, &len);
506 if (op_ptr + len > op_end)
507 error (_("DW_OP_implicit_value: too few bytes available."));
508 ctx->len = len;
509 ctx->data = op_ptr;
510 ctx->location = DWARF_VALUE_LITERAL;
511 op_ptr += len;
512 require_composition (op_ptr, op_end, "DW_OP_implicit_value");
513 }
514 goto no_push;
515
516 case DW_OP_stack_value:
517 ctx->location = DWARF_VALUE_STACK;
518 require_composition (op_ptr, op_end, "DW_OP_stack_value");
519 goto no_push;
520
521 case DW_OP_breg0:
522 case DW_OP_breg1:
523 case DW_OP_breg2:
524 case DW_OP_breg3:
525 case DW_OP_breg4:
526 case DW_OP_breg5:
527 case DW_OP_breg6:
528 case DW_OP_breg7:
529 case DW_OP_breg8:
530 case DW_OP_breg9:
531 case DW_OP_breg10:
532 case DW_OP_breg11:
533 case DW_OP_breg12:
534 case DW_OP_breg13:
535 case DW_OP_breg14:
536 case DW_OP_breg15:
537 case DW_OP_breg16:
538 case DW_OP_breg17:
539 case DW_OP_breg18:
540 case DW_OP_breg19:
541 case DW_OP_breg20:
542 case DW_OP_breg21:
543 case DW_OP_breg22:
544 case DW_OP_breg23:
545 case DW_OP_breg24:
546 case DW_OP_breg25:
547 case DW_OP_breg26:
548 case DW_OP_breg27:
549 case DW_OP_breg28:
550 case DW_OP_breg29:
551 case DW_OP_breg30:
552 case DW_OP_breg31:
553 {
554 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
555 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
556 result += offset;
557 }
558 break;
559 case DW_OP_bregx:
560 {
561 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
562 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
563 result = (ctx->read_reg) (ctx->baton, reg);
564 result += offset;
565 }
566 break;
567 case DW_OP_fbreg:
568 {
569 gdb_byte *datastart;
570 size_t datalen;
571 unsigned int before_stack_len;
572
573 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
574 /* Rather than create a whole new context, we simply
575 record the stack length before execution, then reset it
576 afterwards, effectively erasing whatever the recursive
577 call put there. */
578 before_stack_len = ctx->stack_len;
579 /* FIXME: cagney/2003-03-26: This code should be using
580 get_frame_base_address(), and then implement a dwarf2
581 specific this_base method. */
582 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
583 dwarf_expr_eval (ctx, datastart, datalen);
584 if (ctx->location == DWARF_VALUE_LITERAL
585 || ctx->location == DWARF_VALUE_STACK)
586 error (_("Not implemented: computing frame base using explicit value operator"));
587 result = dwarf_expr_fetch (ctx, 0);
588 if (ctx->location == DWARF_VALUE_REGISTER)
589 result = (ctx->read_reg) (ctx->baton, result);
590 result = result + offset;
591 in_stack_memory = 1;
592 ctx->stack_len = before_stack_len;
593 ctx->location = DWARF_VALUE_MEMORY;
594 }
595 break;
596
597 case DW_OP_dup:
598 result = dwarf_expr_fetch (ctx, 0);
599 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
600 break;
601
602 case DW_OP_drop:
603 dwarf_expr_pop (ctx);
604 goto no_push;
605
606 case DW_OP_pick:
607 offset = *op_ptr++;
608 result = dwarf_expr_fetch (ctx, offset);
609 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
610 break;
611
612 case DW_OP_swap:
613 {
614 struct dwarf_stack_value t1, t2;
615
616 if (ctx->stack_len < 2)
617 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
618 ctx->stack_len);
619 t1 = ctx->stack[ctx->stack_len - 1];
620 t2 = ctx->stack[ctx->stack_len - 2];
621 ctx->stack[ctx->stack_len - 1] = t2;
622 ctx->stack[ctx->stack_len - 2] = t1;
623 goto no_push;
624 }
625
626 case DW_OP_over:
627 result = dwarf_expr_fetch (ctx, 1);
628 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
629 break;
630
631 case DW_OP_rot:
632 {
633 struct dwarf_stack_value t1, t2, t3;
634
635 if (ctx->stack_len < 3)
636 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
637 ctx->stack_len);
638 t1 = ctx->stack[ctx->stack_len - 1];
639 t2 = ctx->stack[ctx->stack_len - 2];
640 t3 = ctx->stack[ctx->stack_len - 3];
641 ctx->stack[ctx->stack_len - 1] = t2;
642 ctx->stack[ctx->stack_len - 2] = t3;
643 ctx->stack[ctx->stack_len - 3] = t1;
644 goto no_push;
645 }
646
647 case DW_OP_deref:
648 case DW_OP_deref_size:
649 case DW_OP_abs:
650 case DW_OP_neg:
651 case DW_OP_not:
652 case DW_OP_plus_uconst:
653 /* Unary operations. */
654 result = dwarf_expr_fetch (ctx, 0);
655 dwarf_expr_pop (ctx);
656
657 switch (op)
658 {
659 case DW_OP_deref:
660 {
661 gdb_byte *buf = alloca (ctx->addr_size);
662
663 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
664 result = dwarf2_read_address (ctx->gdbarch,
665 buf, buf + ctx->addr_size,
666 ctx->addr_size);
667 }
668 break;
669
670 case DW_OP_deref_size:
671 {
672 int addr_size = *op_ptr++;
673 gdb_byte *buf = alloca (addr_size);
674
675 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
676 result = dwarf2_read_address (ctx->gdbarch,
677 buf, buf + addr_size,
678 addr_size);
679 }
680 break;
681
682 case DW_OP_abs:
683 if ((signed int) result < 0)
684 result = -result;
685 break;
686 case DW_OP_neg:
687 result = -result;
688 break;
689 case DW_OP_not:
690 result = ~result;
691 break;
692 case DW_OP_plus_uconst:
693 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
694 result += reg;
695 break;
696 }
697 break;
698
699 case DW_OP_and:
700 case DW_OP_div:
701 case DW_OP_minus:
702 case DW_OP_mod:
703 case DW_OP_mul:
704 case DW_OP_or:
705 case DW_OP_plus:
706 case DW_OP_shl:
707 case DW_OP_shr:
708 case DW_OP_shra:
709 case DW_OP_xor:
710 case DW_OP_le:
711 case DW_OP_ge:
712 case DW_OP_eq:
713 case DW_OP_lt:
714 case DW_OP_gt:
715 case DW_OP_ne:
716 {
717 /* Binary operations. Use the value engine to do computations in
718 the right width. */
719 CORE_ADDR first, second;
720 enum exp_opcode binop;
721 struct value *val1 = NULL, *val2 = NULL;
722 struct type *stype, *utype;
723
724 second = dwarf_expr_fetch (ctx, 0);
725 dwarf_expr_pop (ctx);
726
727 first = dwarf_expr_fetch (ctx, 0);
728 dwarf_expr_pop (ctx);
729
730 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
731 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
732
733 switch (op)
734 {
735 case DW_OP_and:
736 binop = BINOP_BITWISE_AND;
737 break;
738 case DW_OP_div:
739 binop = BINOP_DIV;
740 val1 = value_from_longest (stype, first);
741 val2 = value_from_longest (stype, second);
742 break;
743 case DW_OP_minus:
744 binop = BINOP_SUB;
745 break;
746 case DW_OP_mod:
747 binop = BINOP_MOD;
748 break;
749 case DW_OP_mul:
750 binop = BINOP_MUL;
751 break;
752 case DW_OP_or:
753 binop = BINOP_BITWISE_IOR;
754 break;
755 case DW_OP_plus:
756 binop = BINOP_ADD;
757 break;
758 case DW_OP_shl:
759 binop = BINOP_LSH;
760 break;
761 case DW_OP_shr:
762 binop = BINOP_RSH;
763 break;
764 case DW_OP_shra:
765 binop = BINOP_RSH;
766 val1 = value_from_longest (stype, first);
767 break;
768 case DW_OP_xor:
769 binop = BINOP_BITWISE_XOR;
770 break;
771 case DW_OP_le:
772 binop = BINOP_LEQ;
773 val1 = value_from_longest (stype, first);
774 val2 = value_from_longest (stype, second);
775 break;
776 case DW_OP_ge:
777 binop = BINOP_GEQ;
778 val1 = value_from_longest (stype, first);
779 val2 = value_from_longest (stype, second);
780 break;
781 case DW_OP_eq:
782 binop = BINOP_EQUAL;
783 val1 = value_from_longest (stype, first);
784 val2 = value_from_longest (stype, second);
785 break;
786 case DW_OP_lt:
787 binop = BINOP_LESS;
788 val1 = value_from_longest (stype, first);
789 val2 = value_from_longest (stype, second);
790 break;
791 case DW_OP_gt:
792 binop = BINOP_GTR;
793 val1 = value_from_longest (stype, first);
794 val2 = value_from_longest (stype, second);
795 break;
796 case DW_OP_ne:
797 binop = BINOP_NOTEQUAL;
798 val1 = value_from_longest (stype, first);
799 val2 = value_from_longest (stype, second);
800 break;
801 default:
802 internal_error (__FILE__, __LINE__,
803 _("Can't be reached."));
804 }
805
806 /* We use unsigned operands by default. */
807 if (val1 == NULL)
808 val1 = value_from_longest (utype, first);
809 if (val2 == NULL)
810 val2 = value_from_longest (utype, second);
811
812 result = value_as_long (value_binop (val1, val2, binop));
813 }
814 break;
815
816 case DW_OP_call_frame_cfa:
817 result = (ctx->get_frame_cfa) (ctx->baton);
818 in_stack_memory = 1;
819 break;
820
821 case DW_OP_GNU_push_tls_address:
822 /* Variable is at a constant offset in the thread-local
823 storage block into the objfile for the current thread and
824 the dynamic linker module containing this expression. Here
825 we return returns the offset from that base. The top of the
826 stack has the offset from the beginning of the thread
827 control block at which the variable is located. Nothing
828 should follow this operator, so the top of stack would be
829 returned. */
830 result = dwarf_expr_fetch (ctx, 0);
831 dwarf_expr_pop (ctx);
832 result = (ctx->get_tls_address) (ctx->baton, result);
833 break;
834
835 case DW_OP_skip:
836 offset = extract_signed_integer (op_ptr, 2, byte_order);
837 op_ptr += 2;
838 op_ptr += offset;
839 goto no_push;
840
841 case DW_OP_bra:
842 offset = extract_signed_integer (op_ptr, 2, byte_order);
843 op_ptr += 2;
844 if (dwarf_expr_fetch (ctx, 0) != 0)
845 op_ptr += offset;
846 dwarf_expr_pop (ctx);
847 goto no_push;
848
849 case DW_OP_nop:
850 goto no_push;
851
852 case DW_OP_piece:
853 {
854 ULONGEST size;
855
856 /* Record the piece. */
857 op_ptr = read_uleb128 (op_ptr, op_end, &size);
858 add_piece (ctx, size);
859
860 /* Pop off the address/regnum, and reset the location
861 type. */
862 if (ctx->location != DWARF_VALUE_LITERAL)
863 dwarf_expr_pop (ctx);
864 ctx->location = DWARF_VALUE_MEMORY;
865 }
866 goto no_push;
867
868 case DW_OP_GNU_uninit:
869 if (op_ptr != op_end)
870 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
871 "be the very last op."));
872
873 ctx->initialized = 0;
874 goto no_push;
875
876 default:
877 error (_("Unhandled dwarf expression opcode 0x%x"), op);
878 }
879
880 /* Most things push a result value. */
881 dwarf_expr_push (ctx, result, in_stack_memory);
882 no_push:;
883 }
884
885 ctx->recursion_depth--;
886 gdb_assert (ctx->recursion_depth >= 0);
887 }
This page took 0.06821 seconds and 4 git commands to generate.