Make dwarf_expr_piece::pieces an std::vector
[deliverable/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30 #include "common/underlying.h"
31
32 /* Cookie for gdbarch data. */
33
34 static struct gdbarch_data *dwarf_arch_cookie;
35
36 /* This holds gdbarch-specific types used by the DWARF expression
37 evaluator. See comments in execute_stack_op. */
38
39 struct dwarf_gdbarch_types
40 {
41 struct type *dw_types[3];
42 };
43
44 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
45
46 static void *
47 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
48 {
49 struct dwarf_gdbarch_types *types
50 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
51
52 /* The types themselves are lazily initialized. */
53
54 return types;
55 }
56
57 /* Return the type used for DWARF operations where the type is
58 unspecified in the DWARF spec. Only certain sizes are
59 supported. */
60
61 struct type *
62 dwarf_expr_context::address_type () const
63 {
64 struct dwarf_gdbarch_types *types
65 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
66 dwarf_arch_cookie);
67 int ndx;
68
69 if (this->addr_size == 2)
70 ndx = 0;
71 else if (this->addr_size == 4)
72 ndx = 1;
73 else if (this->addr_size == 8)
74 ndx = 2;
75 else
76 error (_("Unsupported address size in DWARF expressions: %d bits"),
77 8 * this->addr_size);
78
79 if (types->dw_types[ndx] == NULL)
80 types->dw_types[ndx]
81 = arch_integer_type (this->gdbarch,
82 8 * this->addr_size,
83 0, "<signed DWARF address type>");
84
85 return types->dw_types[ndx];
86 }
87
88 /* Create a new context for the expression evaluator. */
89
90 dwarf_expr_context::dwarf_expr_context ()
91 : stack (NULL),
92 stack_len (0),
93 stack_allocated (10),
94 gdbarch (NULL),
95 addr_size (0),
96 ref_addr_size (0),
97 offset (0),
98 recursion_depth (0),
99 max_recursion_depth (0x100),
100 location (DWARF_VALUE_MEMORY),
101 len (0),
102 data (NULL),
103 initialized (0)
104 {
105 this->stack = XNEWVEC (struct dwarf_stack_value, this->stack_allocated);
106 }
107
108 /* Clean up a dwarf_expr_context. */
109
110 dwarf_expr_context::~dwarf_expr_context ()
111 {
112 xfree (this->stack);
113 }
114
115 /* Expand the memory allocated stack to contain at least
116 NEED more elements than are currently used. */
117
118 void
119 dwarf_expr_context::grow_stack (size_t need)
120 {
121 if (this->stack_len + need > this->stack_allocated)
122 {
123 size_t newlen = this->stack_len + need + 10;
124
125 this->stack = XRESIZEVEC (struct dwarf_stack_value, this->stack, newlen);
126 this->stack_allocated = newlen;
127 }
128 }
129
130 /* Push VALUE onto the stack. */
131
132 void
133 dwarf_expr_context::push (struct value *value, int in_stack_memory)
134 {
135 struct dwarf_stack_value *v;
136
137 grow_stack (1);
138 v = &this->stack[this->stack_len++];
139 v->value = value;
140 v->in_stack_memory = in_stack_memory;
141 }
142
143 /* Push VALUE onto the stack. */
144
145 void
146 dwarf_expr_context::push_address (CORE_ADDR value, int in_stack_memory)
147 {
148 push (value_from_ulongest (address_type (), value), in_stack_memory);
149 }
150
151 /* Pop the top item off of the stack. */
152
153 void
154 dwarf_expr_context::pop ()
155 {
156 if (this->stack_len <= 0)
157 error (_("dwarf expression stack underflow"));
158 this->stack_len--;
159 }
160
161 /* Retrieve the N'th item on the stack. */
162
163 struct value *
164 dwarf_expr_context::fetch (int n)
165 {
166 if (this->stack_len <= n)
167 error (_("Asked for position %d of stack, "
168 "stack only has %d elements on it."),
169 n, this->stack_len);
170 return this->stack[this->stack_len - (1 + n)].value;
171 }
172
173 /* Require that TYPE be an integral type; throw an exception if not. */
174
175 static void
176 dwarf_require_integral (struct type *type)
177 {
178 if (TYPE_CODE (type) != TYPE_CODE_INT
179 && TYPE_CODE (type) != TYPE_CODE_CHAR
180 && TYPE_CODE (type) != TYPE_CODE_BOOL)
181 error (_("integral type expected in DWARF expression"));
182 }
183
184 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
185 type. */
186
187 static struct type *
188 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
189 {
190 switch (TYPE_LENGTH (type))
191 {
192 case 1:
193 return builtin_type (gdbarch)->builtin_uint8;
194 case 2:
195 return builtin_type (gdbarch)->builtin_uint16;
196 case 4:
197 return builtin_type (gdbarch)->builtin_uint32;
198 case 8:
199 return builtin_type (gdbarch)->builtin_uint64;
200 default:
201 error (_("no unsigned variant found for type, while evaluating "
202 "DWARF expression"));
203 }
204 }
205
206 /* Return the signed form of TYPE. TYPE is necessarily an integral
207 type. */
208
209 static struct type *
210 get_signed_type (struct gdbarch *gdbarch, struct type *type)
211 {
212 switch (TYPE_LENGTH (type))
213 {
214 case 1:
215 return builtin_type (gdbarch)->builtin_int8;
216 case 2:
217 return builtin_type (gdbarch)->builtin_int16;
218 case 4:
219 return builtin_type (gdbarch)->builtin_int32;
220 case 8:
221 return builtin_type (gdbarch)->builtin_int64;
222 default:
223 error (_("no signed variant found for type, while evaluating "
224 "DWARF expression"));
225 }
226 }
227
228 /* Retrieve the N'th item on the stack, converted to an address. */
229
230 CORE_ADDR
231 dwarf_expr_context::fetch_address (int n)
232 {
233 struct value *result_val = fetch (n);
234 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
235 ULONGEST result;
236
237 dwarf_require_integral (value_type (result_val));
238 result = extract_unsigned_integer (value_contents (result_val),
239 TYPE_LENGTH (value_type (result_val)),
240 byte_order);
241
242 /* For most architectures, calling extract_unsigned_integer() alone
243 is sufficient for extracting an address. However, some
244 architectures (e.g. MIPS) use signed addresses and using
245 extract_unsigned_integer() will not produce a correct
246 result. Make sure we invoke gdbarch_integer_to_address()
247 for those architectures which require it. */
248 if (gdbarch_integer_to_address_p (this->gdbarch))
249 {
250 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
251 struct type *int_type = get_unsigned_type (this->gdbarch,
252 value_type (result_val));
253
254 store_unsigned_integer (buf, this->addr_size, byte_order, result);
255 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
256 }
257
258 return (CORE_ADDR) result;
259 }
260
261 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
262
263 int
264 dwarf_expr_context::fetch_in_stack_memory (int n)
265 {
266 if (this->stack_len <= n)
267 error (_("Asked for position %d of stack, "
268 "stack only has %d elements on it."),
269 n, this->stack_len);
270 return this->stack[this->stack_len - (1 + n)].in_stack_memory;
271 }
272
273 /* Return true if the expression stack is empty. */
274
275 int
276 dwarf_expr_context::stack_empty_p () const
277 {
278 return this->stack_len == 0;
279 }
280
281 /* Add a new piece to the dwarf_expr_context's piece list. */
282 void
283 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
284 {
285 this->pieces.emplace_back ();
286 dwarf_expr_piece &p = this->pieces.back ();
287
288 p.location = this->location;
289 p.size = size;
290 p.offset = offset;
291
292 if (p.location == DWARF_VALUE_LITERAL)
293 {
294 p.v.literal.data = this->data;
295 p.v.literal.length = this->len;
296 }
297 else if (stack_empty_p ())
298 {
299 p.location = DWARF_VALUE_OPTIMIZED_OUT;
300 /* Also reset the context's location, for our callers. This is
301 a somewhat strange approach, but this lets us avoid setting
302 the location to DWARF_VALUE_MEMORY in all the individual
303 cases in the evaluator. */
304 this->location = DWARF_VALUE_OPTIMIZED_OUT;
305 }
306 else if (p.location == DWARF_VALUE_MEMORY)
307 {
308 p.v.mem.addr = fetch_address (0);
309 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
310 }
311 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
312 {
313 p.v.ptr.die_sect_off = (sect_offset) this->len;
314 p.v.ptr.offset = value_as_long (fetch (0));
315 }
316 else if (p.location == DWARF_VALUE_REGISTER)
317 p.v.regno = value_as_long (fetch (0));
318 else
319 {
320 p.v.value = fetch (0);
321 }
322 }
323
324 /* Evaluate the expression at ADDR (LEN bytes long). */
325
326 void
327 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
328 {
329 int old_recursion_depth = this->recursion_depth;
330
331 execute_stack_op (addr, addr + len);
332
333 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
334
335 gdb_assert (this->recursion_depth == old_recursion_depth);
336 }
337
338 /* Helper to read a uleb128 value or throw an error. */
339
340 const gdb_byte *
341 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
342 uint64_t *r)
343 {
344 buf = gdb_read_uleb128 (buf, buf_end, r);
345 if (buf == NULL)
346 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
347 return buf;
348 }
349
350 /* Helper to read a sleb128 value or throw an error. */
351
352 const gdb_byte *
353 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
354 int64_t *r)
355 {
356 buf = gdb_read_sleb128 (buf, buf_end, r);
357 if (buf == NULL)
358 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
359 return buf;
360 }
361
362 const gdb_byte *
363 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
364 {
365 buf = gdb_skip_leb128 (buf, buf_end);
366 if (buf == NULL)
367 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
368 return buf;
369 }
370 \f
371
372 /* Check that the current operator is either at the end of an
373 expression, or that it is followed by a composition operator or by
374 DW_OP_GNU_uninit (which should terminate the expression). */
375
376 void
377 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
378 const char *op_name)
379 {
380 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
381 && *op_ptr != DW_OP_GNU_uninit)
382 error (_("DWARF-2 expression error: `%s' operations must be "
383 "used either alone or in conjunction with DW_OP_piece "
384 "or DW_OP_bit_piece."),
385 op_name);
386 }
387
388 /* Return true iff the types T1 and T2 are "the same". This only does
389 checks that might reasonably be needed to compare DWARF base
390 types. */
391
392 static int
393 base_types_equal_p (struct type *t1, struct type *t2)
394 {
395 if (TYPE_CODE (t1) != TYPE_CODE (t2))
396 return 0;
397 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
398 return 0;
399 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
400 }
401
402 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
403 DWARF register number. Otherwise return -1. */
404
405 int
406 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
407 {
408 uint64_t dwarf_reg;
409
410 if (buf_end <= buf)
411 return -1;
412 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
413 {
414 if (buf_end - buf != 1)
415 return -1;
416 return *buf - DW_OP_reg0;
417 }
418
419 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
420 {
421 buf++;
422 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
423 if (buf == NULL)
424 return -1;
425 buf = gdb_skip_leb128 (buf, buf_end);
426 if (buf == NULL)
427 return -1;
428 }
429 else if (*buf == DW_OP_regx)
430 {
431 buf++;
432 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
433 if (buf == NULL)
434 return -1;
435 }
436 else
437 return -1;
438 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
439 return -1;
440 return dwarf_reg;
441 }
442
443 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
444 DW_OP_deref* return the DWARF register number. Otherwise return -1.
445 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
446 size from DW_OP_deref_size. */
447
448 int
449 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
450 CORE_ADDR *deref_size_return)
451 {
452 uint64_t dwarf_reg;
453 int64_t offset;
454
455 if (buf_end <= buf)
456 return -1;
457
458 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
459 {
460 dwarf_reg = *buf - DW_OP_breg0;
461 buf++;
462 if (buf >= buf_end)
463 return -1;
464 }
465 else if (*buf == DW_OP_bregx)
466 {
467 buf++;
468 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
469 if (buf == NULL)
470 return -1;
471 if ((int) dwarf_reg != dwarf_reg)
472 return -1;
473 }
474 else
475 return -1;
476
477 buf = gdb_read_sleb128 (buf, buf_end, &offset);
478 if (buf == NULL)
479 return -1;
480 if (offset != 0)
481 return -1;
482
483 if (*buf == DW_OP_deref)
484 {
485 buf++;
486 *deref_size_return = -1;
487 }
488 else if (*buf == DW_OP_deref_size)
489 {
490 buf++;
491 if (buf >= buf_end)
492 return -1;
493 *deref_size_return = *buf++;
494 }
495 else
496 return -1;
497
498 if (buf != buf_end)
499 return -1;
500
501 return dwarf_reg;
502 }
503
504 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
505 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
506
507 int
508 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
509 CORE_ADDR *fb_offset_return)
510 {
511 int64_t fb_offset;
512
513 if (buf_end <= buf)
514 return 0;
515
516 if (*buf != DW_OP_fbreg)
517 return 0;
518 buf++;
519
520 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
521 if (buf == NULL)
522 return 0;
523 *fb_offset_return = fb_offset;
524 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
525 return 0;
526
527 return 1;
528 }
529
530 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
531 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
532 The matched SP register number depends on GDBARCH. */
533
534 int
535 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
536 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
537 {
538 uint64_t dwarf_reg;
539 int64_t sp_offset;
540
541 if (buf_end <= buf)
542 return 0;
543 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
544 {
545 dwarf_reg = *buf - DW_OP_breg0;
546 buf++;
547 }
548 else
549 {
550 if (*buf != DW_OP_bregx)
551 return 0;
552 buf++;
553 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
554 if (buf == NULL)
555 return 0;
556 }
557
558 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
559 != gdbarch_sp_regnum (gdbarch))
560 return 0;
561
562 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
563 if (buf == NULL)
564 return 0;
565 *sp_offset_return = sp_offset;
566 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
567 return 0;
568
569 return 1;
570 }
571
572 /* The engine for the expression evaluator. Using the context in this
573 object, evaluate the expression between OP_PTR and OP_END. */
574
575 void
576 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
577 const gdb_byte *op_end)
578 {
579 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
580 /* Old-style "untyped" DWARF values need special treatment in a
581 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
582 a special type for these values so we can distinguish them from
583 values that have an explicit type, because explicitly-typed
584 values do not need special treatment. This special type must be
585 different (in the `==' sense) from any base type coming from the
586 CU. */
587 struct type *address_type = this->address_type ();
588
589 this->location = DWARF_VALUE_MEMORY;
590 this->initialized = 1; /* Default is initialized. */
591
592 if (this->recursion_depth > this->max_recursion_depth)
593 error (_("DWARF-2 expression error: Loop detected (%d)."),
594 this->recursion_depth);
595 this->recursion_depth++;
596
597 while (op_ptr < op_end)
598 {
599 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
600 ULONGEST result;
601 /* Assume the value is not in stack memory.
602 Code that knows otherwise sets this to 1.
603 Some arithmetic on stack addresses can probably be assumed to still
604 be a stack address, but we skip this complication for now.
605 This is just an optimization, so it's always ok to punt
606 and leave this as 0. */
607 int in_stack_memory = 0;
608 uint64_t uoffset, reg;
609 int64_t offset;
610 struct value *result_val = NULL;
611
612 /* The DWARF expression might have a bug causing an infinite
613 loop. In that case, quitting is the only way out. */
614 QUIT;
615
616 switch (op)
617 {
618 case DW_OP_lit0:
619 case DW_OP_lit1:
620 case DW_OP_lit2:
621 case DW_OP_lit3:
622 case DW_OP_lit4:
623 case DW_OP_lit5:
624 case DW_OP_lit6:
625 case DW_OP_lit7:
626 case DW_OP_lit8:
627 case DW_OP_lit9:
628 case DW_OP_lit10:
629 case DW_OP_lit11:
630 case DW_OP_lit12:
631 case DW_OP_lit13:
632 case DW_OP_lit14:
633 case DW_OP_lit15:
634 case DW_OP_lit16:
635 case DW_OP_lit17:
636 case DW_OP_lit18:
637 case DW_OP_lit19:
638 case DW_OP_lit20:
639 case DW_OP_lit21:
640 case DW_OP_lit22:
641 case DW_OP_lit23:
642 case DW_OP_lit24:
643 case DW_OP_lit25:
644 case DW_OP_lit26:
645 case DW_OP_lit27:
646 case DW_OP_lit28:
647 case DW_OP_lit29:
648 case DW_OP_lit30:
649 case DW_OP_lit31:
650 result = op - DW_OP_lit0;
651 result_val = value_from_ulongest (address_type, result);
652 break;
653
654 case DW_OP_addr:
655 result = extract_unsigned_integer (op_ptr,
656 this->addr_size, byte_order);
657 op_ptr += this->addr_size;
658 /* Some versions of GCC emit DW_OP_addr before
659 DW_OP_GNU_push_tls_address. In this case the value is an
660 index, not an address. We don't support things like
661 branching between the address and the TLS op. */
662 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
663 result += this->offset;
664 result_val = value_from_ulongest (address_type, result);
665 break;
666
667 case DW_OP_GNU_addr_index:
668 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
669 result = this->get_addr_index (uoffset);
670 result += this->offset;
671 result_val = value_from_ulongest (address_type, result);
672 break;
673 case DW_OP_GNU_const_index:
674 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
675 result = this->get_addr_index (uoffset);
676 result_val = value_from_ulongest (address_type, result);
677 break;
678
679 case DW_OP_const1u:
680 result = extract_unsigned_integer (op_ptr, 1, byte_order);
681 result_val = value_from_ulongest (address_type, result);
682 op_ptr += 1;
683 break;
684 case DW_OP_const1s:
685 result = extract_signed_integer (op_ptr, 1, byte_order);
686 result_val = value_from_ulongest (address_type, result);
687 op_ptr += 1;
688 break;
689 case DW_OP_const2u:
690 result = extract_unsigned_integer (op_ptr, 2, byte_order);
691 result_val = value_from_ulongest (address_type, result);
692 op_ptr += 2;
693 break;
694 case DW_OP_const2s:
695 result = extract_signed_integer (op_ptr, 2, byte_order);
696 result_val = value_from_ulongest (address_type, result);
697 op_ptr += 2;
698 break;
699 case DW_OP_const4u:
700 result = extract_unsigned_integer (op_ptr, 4, byte_order);
701 result_val = value_from_ulongest (address_type, result);
702 op_ptr += 4;
703 break;
704 case DW_OP_const4s:
705 result = extract_signed_integer (op_ptr, 4, byte_order);
706 result_val = value_from_ulongest (address_type, result);
707 op_ptr += 4;
708 break;
709 case DW_OP_const8u:
710 result = extract_unsigned_integer (op_ptr, 8, byte_order);
711 result_val = value_from_ulongest (address_type, result);
712 op_ptr += 8;
713 break;
714 case DW_OP_const8s:
715 result = extract_signed_integer (op_ptr, 8, byte_order);
716 result_val = value_from_ulongest (address_type, result);
717 op_ptr += 8;
718 break;
719 case DW_OP_constu:
720 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
721 result = uoffset;
722 result_val = value_from_ulongest (address_type, result);
723 break;
724 case DW_OP_consts:
725 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
726 result = offset;
727 result_val = value_from_ulongest (address_type, result);
728 break;
729
730 /* The DW_OP_reg operations are required to occur alone in
731 location expressions. */
732 case DW_OP_reg0:
733 case DW_OP_reg1:
734 case DW_OP_reg2:
735 case DW_OP_reg3:
736 case DW_OP_reg4:
737 case DW_OP_reg5:
738 case DW_OP_reg6:
739 case DW_OP_reg7:
740 case DW_OP_reg8:
741 case DW_OP_reg9:
742 case DW_OP_reg10:
743 case DW_OP_reg11:
744 case DW_OP_reg12:
745 case DW_OP_reg13:
746 case DW_OP_reg14:
747 case DW_OP_reg15:
748 case DW_OP_reg16:
749 case DW_OP_reg17:
750 case DW_OP_reg18:
751 case DW_OP_reg19:
752 case DW_OP_reg20:
753 case DW_OP_reg21:
754 case DW_OP_reg22:
755 case DW_OP_reg23:
756 case DW_OP_reg24:
757 case DW_OP_reg25:
758 case DW_OP_reg26:
759 case DW_OP_reg27:
760 case DW_OP_reg28:
761 case DW_OP_reg29:
762 case DW_OP_reg30:
763 case DW_OP_reg31:
764 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
765
766 result = op - DW_OP_reg0;
767 result_val = value_from_ulongest (address_type, result);
768 this->location = DWARF_VALUE_REGISTER;
769 break;
770
771 case DW_OP_regx:
772 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
773 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
774
775 result = reg;
776 result_val = value_from_ulongest (address_type, result);
777 this->location = DWARF_VALUE_REGISTER;
778 break;
779
780 case DW_OP_implicit_value:
781 {
782 uint64_t len;
783
784 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
785 if (op_ptr + len > op_end)
786 error (_("DW_OP_implicit_value: too few bytes available."));
787 this->len = len;
788 this->data = op_ptr;
789 this->location = DWARF_VALUE_LITERAL;
790 op_ptr += len;
791 dwarf_expr_require_composition (op_ptr, op_end,
792 "DW_OP_implicit_value");
793 }
794 goto no_push;
795
796 case DW_OP_stack_value:
797 this->location = DWARF_VALUE_STACK;
798 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
799 goto no_push;
800
801 case DW_OP_implicit_pointer:
802 case DW_OP_GNU_implicit_pointer:
803 {
804 int64_t len;
805
806 if (this->ref_addr_size == -1)
807 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
808 "is not allowed in frame context"));
809
810 /* The referred-to DIE of sect_offset kind. */
811 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
812 byte_order);
813 op_ptr += this->ref_addr_size;
814
815 /* The byte offset into the data. */
816 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
817 result = (ULONGEST) len;
818 result_val = value_from_ulongest (address_type, result);
819
820 this->location = DWARF_VALUE_IMPLICIT_POINTER;
821 dwarf_expr_require_composition (op_ptr, op_end,
822 "DW_OP_implicit_pointer");
823 }
824 break;
825
826 case DW_OP_breg0:
827 case DW_OP_breg1:
828 case DW_OP_breg2:
829 case DW_OP_breg3:
830 case DW_OP_breg4:
831 case DW_OP_breg5:
832 case DW_OP_breg6:
833 case DW_OP_breg7:
834 case DW_OP_breg8:
835 case DW_OP_breg9:
836 case DW_OP_breg10:
837 case DW_OP_breg11:
838 case DW_OP_breg12:
839 case DW_OP_breg13:
840 case DW_OP_breg14:
841 case DW_OP_breg15:
842 case DW_OP_breg16:
843 case DW_OP_breg17:
844 case DW_OP_breg18:
845 case DW_OP_breg19:
846 case DW_OP_breg20:
847 case DW_OP_breg21:
848 case DW_OP_breg22:
849 case DW_OP_breg23:
850 case DW_OP_breg24:
851 case DW_OP_breg25:
852 case DW_OP_breg26:
853 case DW_OP_breg27:
854 case DW_OP_breg28:
855 case DW_OP_breg29:
856 case DW_OP_breg30:
857 case DW_OP_breg31:
858 {
859 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
860 result = this->read_addr_from_reg (op - DW_OP_breg0);
861 result += offset;
862 result_val = value_from_ulongest (address_type, result);
863 }
864 break;
865 case DW_OP_bregx:
866 {
867 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
868 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
869 result = this->read_addr_from_reg (reg);
870 result += offset;
871 result_val = value_from_ulongest (address_type, result);
872 }
873 break;
874 case DW_OP_fbreg:
875 {
876 const gdb_byte *datastart;
877 size_t datalen;
878 unsigned int before_stack_len;
879
880 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
881 /* Rather than create a whole new context, we simply
882 record the stack length before execution, then reset it
883 afterwards, effectively erasing whatever the recursive
884 call put there. */
885 before_stack_len = this->stack_len;
886 /* FIXME: cagney/2003-03-26: This code should be using
887 get_frame_base_address(), and then implement a dwarf2
888 specific this_base method. */
889 this->get_frame_base (&datastart, &datalen);
890 eval (datastart, datalen);
891 if (this->location == DWARF_VALUE_MEMORY)
892 result = fetch_address (0);
893 else if (this->location == DWARF_VALUE_REGISTER)
894 result = this->read_addr_from_reg (value_as_long (fetch (0)));
895 else
896 error (_("Not implemented: computing frame "
897 "base using explicit value operator"));
898 result = result + offset;
899 result_val = value_from_ulongest (address_type, result);
900 in_stack_memory = 1;
901 this->stack_len = before_stack_len;
902 this->location = DWARF_VALUE_MEMORY;
903 }
904 break;
905
906 case DW_OP_dup:
907 result_val = fetch (0);
908 in_stack_memory = fetch_in_stack_memory (0);
909 break;
910
911 case DW_OP_drop:
912 pop ();
913 goto no_push;
914
915 case DW_OP_pick:
916 offset = *op_ptr++;
917 result_val = fetch (offset);
918 in_stack_memory = fetch_in_stack_memory (offset);
919 break;
920
921 case DW_OP_swap:
922 {
923 struct dwarf_stack_value t1, t2;
924
925 if (this->stack_len < 2)
926 error (_("Not enough elements for "
927 "DW_OP_swap. Need 2, have %d."),
928 this->stack_len);
929 t1 = this->stack[this->stack_len - 1];
930 t2 = this->stack[this->stack_len - 2];
931 this->stack[this->stack_len - 1] = t2;
932 this->stack[this->stack_len - 2] = t1;
933 goto no_push;
934 }
935
936 case DW_OP_over:
937 result_val = fetch (1);
938 in_stack_memory = fetch_in_stack_memory (1);
939 break;
940
941 case DW_OP_rot:
942 {
943 struct dwarf_stack_value t1, t2, t3;
944
945 if (this->stack_len < 3)
946 error (_("Not enough elements for "
947 "DW_OP_rot. Need 3, have %d."),
948 this->stack_len);
949 t1 = this->stack[this->stack_len - 1];
950 t2 = this->stack[this->stack_len - 2];
951 t3 = this->stack[this->stack_len - 3];
952 this->stack[this->stack_len - 1] = t2;
953 this->stack[this->stack_len - 2] = t3;
954 this->stack[this->stack_len - 3] = t1;
955 goto no_push;
956 }
957
958 case DW_OP_deref:
959 case DW_OP_deref_size:
960 case DW_OP_deref_type:
961 case DW_OP_GNU_deref_type:
962 {
963 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
964 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
965 CORE_ADDR addr = fetch_address (0);
966 struct type *type;
967
968 pop ();
969
970 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
971 {
972 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
973 cu_offset type_die_cu_off = (cu_offset) uoffset;
974 type = get_base_type (type_die_cu_off, 0);
975 }
976 else
977 type = address_type;
978
979 this->read_mem (buf, addr, addr_size);
980
981 /* If the size of the object read from memory is different
982 from the type length, we need to zero-extend it. */
983 if (TYPE_LENGTH (type) != addr_size)
984 {
985 ULONGEST result =
986 extract_unsigned_integer (buf, addr_size, byte_order);
987
988 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
989 store_unsigned_integer (buf, TYPE_LENGTH (type),
990 byte_order, result);
991 }
992
993 result_val = value_from_contents_and_address (type, buf, addr);
994 break;
995 }
996
997 case DW_OP_abs:
998 case DW_OP_neg:
999 case DW_OP_not:
1000 case DW_OP_plus_uconst:
1001 {
1002 /* Unary operations. */
1003 result_val = fetch (0);
1004 pop ();
1005
1006 switch (op)
1007 {
1008 case DW_OP_abs:
1009 if (value_less (result_val,
1010 value_zero (value_type (result_val), not_lval)))
1011 result_val = value_neg (result_val);
1012 break;
1013 case DW_OP_neg:
1014 result_val = value_neg (result_val);
1015 break;
1016 case DW_OP_not:
1017 dwarf_require_integral (value_type (result_val));
1018 result_val = value_complement (result_val);
1019 break;
1020 case DW_OP_plus_uconst:
1021 dwarf_require_integral (value_type (result_val));
1022 result = value_as_long (result_val);
1023 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1024 result += reg;
1025 result_val = value_from_ulongest (address_type, result);
1026 break;
1027 }
1028 }
1029 break;
1030
1031 case DW_OP_and:
1032 case DW_OP_div:
1033 case DW_OP_minus:
1034 case DW_OP_mod:
1035 case DW_OP_mul:
1036 case DW_OP_or:
1037 case DW_OP_plus:
1038 case DW_OP_shl:
1039 case DW_OP_shr:
1040 case DW_OP_shra:
1041 case DW_OP_xor:
1042 case DW_OP_le:
1043 case DW_OP_ge:
1044 case DW_OP_eq:
1045 case DW_OP_lt:
1046 case DW_OP_gt:
1047 case DW_OP_ne:
1048 {
1049 /* Binary operations. */
1050 struct value *first, *second;
1051
1052 second = fetch (0);
1053 pop ();
1054
1055 first = fetch (0);
1056 pop ();
1057
1058 if (! base_types_equal_p (value_type (first), value_type (second)))
1059 error (_("Incompatible types on DWARF stack"));
1060
1061 switch (op)
1062 {
1063 case DW_OP_and:
1064 dwarf_require_integral (value_type (first));
1065 dwarf_require_integral (value_type (second));
1066 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1067 break;
1068 case DW_OP_div:
1069 result_val = value_binop (first, second, BINOP_DIV);
1070 break;
1071 case DW_OP_minus:
1072 result_val = value_binop (first, second, BINOP_SUB);
1073 break;
1074 case DW_OP_mod:
1075 {
1076 int cast_back = 0;
1077 struct type *orig_type = value_type (first);
1078
1079 /* We have to special-case "old-style" untyped values
1080 -- these must have mod computed using unsigned
1081 math. */
1082 if (orig_type == address_type)
1083 {
1084 struct type *utype
1085 = get_unsigned_type (this->gdbarch, orig_type);
1086
1087 cast_back = 1;
1088 first = value_cast (utype, first);
1089 second = value_cast (utype, second);
1090 }
1091 /* Note that value_binop doesn't handle float or
1092 decimal float here. This seems unimportant. */
1093 result_val = value_binop (first, second, BINOP_MOD);
1094 if (cast_back)
1095 result_val = value_cast (orig_type, result_val);
1096 }
1097 break;
1098 case DW_OP_mul:
1099 result_val = value_binop (first, second, BINOP_MUL);
1100 break;
1101 case DW_OP_or:
1102 dwarf_require_integral (value_type (first));
1103 dwarf_require_integral (value_type (second));
1104 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1105 break;
1106 case DW_OP_plus:
1107 result_val = value_binop (first, second, BINOP_ADD);
1108 break;
1109 case DW_OP_shl:
1110 dwarf_require_integral (value_type (first));
1111 dwarf_require_integral (value_type (second));
1112 result_val = value_binop (first, second, BINOP_LSH);
1113 break;
1114 case DW_OP_shr:
1115 dwarf_require_integral (value_type (first));
1116 dwarf_require_integral (value_type (second));
1117 if (!TYPE_UNSIGNED (value_type (first)))
1118 {
1119 struct type *utype
1120 = get_unsigned_type (this->gdbarch, value_type (first));
1121
1122 first = value_cast (utype, first);
1123 }
1124
1125 result_val = value_binop (first, second, BINOP_RSH);
1126 /* Make sure we wind up with the same type we started
1127 with. */
1128 if (value_type (result_val) != value_type (second))
1129 result_val = value_cast (value_type (second), result_val);
1130 break;
1131 case DW_OP_shra:
1132 dwarf_require_integral (value_type (first));
1133 dwarf_require_integral (value_type (second));
1134 if (TYPE_UNSIGNED (value_type (first)))
1135 {
1136 struct type *stype
1137 = get_signed_type (this->gdbarch, value_type (first));
1138
1139 first = value_cast (stype, first);
1140 }
1141
1142 result_val = value_binop (first, second, BINOP_RSH);
1143 /* Make sure we wind up with the same type we started
1144 with. */
1145 if (value_type (result_val) != value_type (second))
1146 result_val = value_cast (value_type (second), result_val);
1147 break;
1148 case DW_OP_xor:
1149 dwarf_require_integral (value_type (first));
1150 dwarf_require_integral (value_type (second));
1151 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1152 break;
1153 case DW_OP_le:
1154 /* A <= B is !(B < A). */
1155 result = ! value_less (second, first);
1156 result_val = value_from_ulongest (address_type, result);
1157 break;
1158 case DW_OP_ge:
1159 /* A >= B is !(A < B). */
1160 result = ! value_less (first, second);
1161 result_val = value_from_ulongest (address_type, result);
1162 break;
1163 case DW_OP_eq:
1164 result = value_equal (first, second);
1165 result_val = value_from_ulongest (address_type, result);
1166 break;
1167 case DW_OP_lt:
1168 result = value_less (first, second);
1169 result_val = value_from_ulongest (address_type, result);
1170 break;
1171 case DW_OP_gt:
1172 /* A > B is B < A. */
1173 result = value_less (second, first);
1174 result_val = value_from_ulongest (address_type, result);
1175 break;
1176 case DW_OP_ne:
1177 result = ! value_equal (first, second);
1178 result_val = value_from_ulongest (address_type, result);
1179 break;
1180 default:
1181 internal_error (__FILE__, __LINE__,
1182 _("Can't be reached."));
1183 }
1184 }
1185 break;
1186
1187 case DW_OP_call_frame_cfa:
1188 result = this->get_frame_cfa ();
1189 result_val = value_from_ulongest (address_type, result);
1190 in_stack_memory = 1;
1191 break;
1192
1193 case DW_OP_GNU_push_tls_address:
1194 case DW_OP_form_tls_address:
1195 /* Variable is at a constant offset in the thread-local
1196 storage block into the objfile for the current thread and
1197 the dynamic linker module containing this expression. Here
1198 we return returns the offset from that base. The top of the
1199 stack has the offset from the beginning of the thread
1200 control block at which the variable is located. Nothing
1201 should follow this operator, so the top of stack would be
1202 returned. */
1203 result = value_as_long (fetch (0));
1204 pop ();
1205 result = this->get_tls_address (result);
1206 result_val = value_from_ulongest (address_type, result);
1207 break;
1208
1209 case DW_OP_skip:
1210 offset = extract_signed_integer (op_ptr, 2, byte_order);
1211 op_ptr += 2;
1212 op_ptr += offset;
1213 goto no_push;
1214
1215 case DW_OP_bra:
1216 {
1217 struct value *val;
1218
1219 offset = extract_signed_integer (op_ptr, 2, byte_order);
1220 op_ptr += 2;
1221 val = fetch (0);
1222 dwarf_require_integral (value_type (val));
1223 if (value_as_long (val) != 0)
1224 op_ptr += offset;
1225 pop ();
1226 }
1227 goto no_push;
1228
1229 case DW_OP_nop:
1230 goto no_push;
1231
1232 case DW_OP_piece:
1233 {
1234 uint64_t size;
1235
1236 /* Record the piece. */
1237 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1238 add_piece (8 * size, 0);
1239
1240 /* Pop off the address/regnum, and reset the location
1241 type. */
1242 if (this->location != DWARF_VALUE_LITERAL
1243 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1244 pop ();
1245 this->location = DWARF_VALUE_MEMORY;
1246 }
1247 goto no_push;
1248
1249 case DW_OP_bit_piece:
1250 {
1251 uint64_t size, offset;
1252
1253 /* Record the piece. */
1254 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1255 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1256 add_piece (size, offset);
1257
1258 /* Pop off the address/regnum, and reset the location
1259 type. */
1260 if (this->location != DWARF_VALUE_LITERAL
1261 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1262 pop ();
1263 this->location = DWARF_VALUE_MEMORY;
1264 }
1265 goto no_push;
1266
1267 case DW_OP_GNU_uninit:
1268 if (op_ptr != op_end)
1269 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1270 "be the very last op."));
1271
1272 this->initialized = 0;
1273 goto no_push;
1274
1275 case DW_OP_call2:
1276 {
1277 cu_offset cu_off
1278 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1279 op_ptr += 2;
1280 this->dwarf_call (cu_off);
1281 }
1282 goto no_push;
1283
1284 case DW_OP_call4:
1285 {
1286 cu_offset cu_off
1287 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1288 op_ptr += 4;
1289 this->dwarf_call (cu_off);
1290 }
1291 goto no_push;
1292
1293 case DW_OP_entry_value:
1294 case DW_OP_GNU_entry_value:
1295 {
1296 uint64_t len;
1297 CORE_ADDR deref_size;
1298 union call_site_parameter_u kind_u;
1299
1300 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1301 if (op_ptr + len > op_end)
1302 error (_("DW_OP_entry_value: too few bytes available."));
1303
1304 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1305 if (kind_u.dwarf_reg != -1)
1306 {
1307 op_ptr += len;
1308 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1309 kind_u,
1310 -1 /* deref_size */);
1311 goto no_push;
1312 }
1313
1314 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1315 op_ptr + len,
1316 &deref_size);
1317 if (kind_u.dwarf_reg != -1)
1318 {
1319 if (deref_size == -1)
1320 deref_size = this->addr_size;
1321 op_ptr += len;
1322 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1323 kind_u, deref_size);
1324 goto no_push;
1325 }
1326
1327 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1328 "supported only for single DW_OP_reg* "
1329 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1330 }
1331
1332 case DW_OP_GNU_parameter_ref:
1333 {
1334 union call_site_parameter_u kind_u;
1335
1336 kind_u.param_cu_off
1337 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1338 op_ptr += 4;
1339 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1340 kind_u,
1341 -1 /* deref_size */);
1342 }
1343 goto no_push;
1344
1345 case DW_OP_const_type:
1346 case DW_OP_GNU_const_type:
1347 {
1348 int n;
1349 const gdb_byte *data;
1350 struct type *type;
1351
1352 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1353 cu_offset type_die_cu_off = (cu_offset) uoffset;
1354
1355 n = *op_ptr++;
1356 data = op_ptr;
1357 op_ptr += n;
1358
1359 type = get_base_type (type_die_cu_off, n);
1360 result_val = value_from_contents (type, data);
1361 }
1362 break;
1363
1364 case DW_OP_regval_type:
1365 case DW_OP_GNU_regval_type:
1366 {
1367 struct type *type;
1368
1369 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1370 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1371 cu_offset type_die_cu_off = (cu_offset) uoffset;
1372
1373 type = get_base_type (type_die_cu_off, 0);
1374 result_val = this->get_reg_value (type, reg);
1375 }
1376 break;
1377
1378 case DW_OP_convert:
1379 case DW_OP_GNU_convert:
1380 case DW_OP_reinterpret:
1381 case DW_OP_GNU_reinterpret:
1382 {
1383 struct type *type;
1384
1385 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1386 cu_offset type_die_cu_off = (cu_offset) uoffset;
1387
1388 if (to_underlying (type_die_cu_off) == 0)
1389 type = address_type;
1390 else
1391 type = get_base_type (type_die_cu_off, 0);
1392
1393 result_val = fetch (0);
1394 pop ();
1395
1396 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1397 result_val = value_cast (type, result_val);
1398 else if (type == value_type (result_val))
1399 {
1400 /* Nothing. */
1401 }
1402 else if (TYPE_LENGTH (type)
1403 != TYPE_LENGTH (value_type (result_val)))
1404 error (_("DW_OP_reinterpret has wrong size"));
1405 else
1406 result_val
1407 = value_from_contents (type,
1408 value_contents_all (result_val));
1409 }
1410 break;
1411
1412 case DW_OP_push_object_address:
1413 /* Return the address of the object we are currently observing. */
1414 result = this->get_object_address ();
1415 result_val = value_from_ulongest (address_type, result);
1416 break;
1417
1418 default:
1419 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1420 }
1421
1422 /* Most things push a result value. */
1423 gdb_assert (result_val != NULL);
1424 push (result_val, in_stack_memory);
1425 no_push:
1426 ;
1427 }
1428
1429 /* To simplify our main caller, if the result is an implicit
1430 pointer, then make a pieced value. This is ok because we can't
1431 have implicit pointers in contexts where pieces are invalid. */
1432 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1433 add_piece (8 * this->addr_size, 0);
1434
1435 abort_expression:
1436 this->recursion_depth--;
1437 gdb_assert (this->recursion_depth >= 0);
1438 }
1439
1440 void
1441 _initialize_dwarf2expr (void)
1442 {
1443 dwarf_arch_cookie
1444 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1445 }
This page took 0.065011 seconds and 5 git commands to generate.