Update ISA 3.0 / POWER9 gdb tests to match GAS test cases.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 LONGEST offset;
69
70 /* Length of the range. */
71 LONGEST length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (LONGEST offset1, LONGEST len1,
83 LONGEST offset2, LONGEST len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
108 {
109 range_s what;
110 LONGEST i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 LONGEST offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 LONGEST bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 LONGEST bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 LONGEST embedded_offset;
313 LONGEST pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value,
361 LONGEST offset, LONGEST length)
362 {
363 return value_bits_available (value,
364 offset * TARGET_CHAR_BIT,
365 length * TARGET_CHAR_BIT);
366 }
367
368 int
369 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
370 {
371 gdb_assert (!value->lazy);
372
373 return ranges_contain (value->optimized_out, bit_offset, bit_length);
374 }
375
376 int
377 value_entirely_available (struct value *value)
378 {
379 /* We can only tell whether the whole value is available when we try
380 to read it. */
381 if (value->lazy)
382 value_fetch_lazy (value);
383
384 if (VEC_empty (range_s, value->unavailable))
385 return 1;
386 return 0;
387 }
388
389 /* Returns true if VALUE is entirely covered by RANGES. If the value
390 is lazy, it'll be read now. Note that RANGE is a pointer to
391 pointer because reading the value might change *RANGE. */
392
393 static int
394 value_entirely_covered_by_range_vector (struct value *value,
395 VEC(range_s) **ranges)
396 {
397 /* We can only tell whether the whole value is optimized out /
398 unavailable when we try to read it. */
399 if (value->lazy)
400 value_fetch_lazy (value);
401
402 if (VEC_length (range_s, *ranges) == 1)
403 {
404 struct range *t = VEC_index (range_s, *ranges, 0);
405
406 if (t->offset == 0
407 && t->length == (TARGET_CHAR_BIT
408 * TYPE_LENGTH (value_enclosing_type (value))))
409 return 1;
410 }
411
412 return 0;
413 }
414
415 int
416 value_entirely_unavailable (struct value *value)
417 {
418 return value_entirely_covered_by_range_vector (value, &value->unavailable);
419 }
420
421 int
422 value_entirely_optimized_out (struct value *value)
423 {
424 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
425 }
426
427 /* Insert into the vector pointed to by VECTORP the bit range starting of
428 OFFSET bits, and extending for the next LENGTH bits. */
429
430 static void
431 insert_into_bit_range_vector (VEC(range_s) **vectorp,
432 LONGEST offset, LONGEST length)
433 {
434 range_s newr;
435 int i;
436
437 /* Insert the range sorted. If there's overlap or the new range
438 would be contiguous with an existing range, merge. */
439
440 newr.offset = offset;
441 newr.length = length;
442
443 /* Do a binary search for the position the given range would be
444 inserted if we only considered the starting OFFSET of ranges.
445 Call that position I. Since we also have LENGTH to care for
446 (this is a range afterall), we need to check if the _previous_
447 range overlaps the I range. E.g., calling R the new range:
448
449 #1 - overlaps with previous
450
451 R
452 |-...-|
453 |---| |---| |------| ... |--|
454 0 1 2 N
455
456 I=1
457
458 In the case #1 above, the binary search would return `I=1',
459 meaning, this OFFSET should be inserted at position 1, and the
460 current position 1 should be pushed further (and become 2). But,
461 note that `0' overlaps with R, so we want to merge them.
462
463 A similar consideration needs to be taken if the new range would
464 be contiguous with the previous range:
465
466 #2 - contiguous with previous
467
468 R
469 |-...-|
470 |--| |---| |------| ... |--|
471 0 1 2 N
472
473 I=1
474
475 If there's no overlap with the previous range, as in:
476
477 #3 - not overlapping and not contiguous
478
479 R
480 |-...-|
481 |--| |---| |------| ... |--|
482 0 1 2 N
483
484 I=1
485
486 or if I is 0:
487
488 #4 - R is the range with lowest offset
489
490 R
491 |-...-|
492 |--| |---| |------| ... |--|
493 0 1 2 N
494
495 I=0
496
497 ... we just push the new range to I.
498
499 All the 4 cases above need to consider that the new range may
500 also overlap several of the ranges that follow, or that R may be
501 contiguous with the following range, and merge. E.g.,
502
503 #5 - overlapping following ranges
504
505 R
506 |------------------------|
507 |--| |---| |------| ... |--|
508 0 1 2 N
509
510 I=0
511
512 or:
513
514 R
515 |-------|
516 |--| |---| |------| ... |--|
517 0 1 2 N
518
519 I=1
520
521 */
522
523 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
524 if (i > 0)
525 {
526 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
527
528 if (ranges_overlap (bef->offset, bef->length, offset, length))
529 {
530 /* #1 */
531 ULONGEST l = min (bef->offset, offset);
532 ULONGEST h = max (bef->offset + bef->length, offset + length);
533
534 bef->offset = l;
535 bef->length = h - l;
536 i--;
537 }
538 else if (offset == bef->offset + bef->length)
539 {
540 /* #2 */
541 bef->length += length;
542 i--;
543 }
544 else
545 {
546 /* #3 */
547 VEC_safe_insert (range_s, *vectorp, i, &newr);
548 }
549 }
550 else
551 {
552 /* #4 */
553 VEC_safe_insert (range_s, *vectorp, i, &newr);
554 }
555
556 /* Check whether the ranges following the one we've just added or
557 touched can be folded in (#5 above). */
558 if (i + 1 < VEC_length (range_s, *vectorp))
559 {
560 struct range *t;
561 struct range *r;
562 int removed = 0;
563 int next = i + 1;
564
565 /* Get the range we just touched. */
566 t = VEC_index (range_s, *vectorp, i);
567 removed = 0;
568
569 i = next;
570 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
571 if (r->offset <= t->offset + t->length)
572 {
573 ULONGEST l, h;
574
575 l = min (t->offset, r->offset);
576 h = max (t->offset + t->length, r->offset + r->length);
577
578 t->offset = l;
579 t->length = h - l;
580
581 removed++;
582 }
583 else
584 {
585 /* If we couldn't merge this one, we won't be able to
586 merge following ones either, since the ranges are
587 always sorted by OFFSET. */
588 break;
589 }
590
591 if (removed != 0)
592 VEC_block_remove (range_s, *vectorp, next, removed);
593 }
594 }
595
596 void
597 mark_value_bits_unavailable (struct value *value,
598 LONGEST offset, LONGEST length)
599 {
600 insert_into_bit_range_vector (&value->unavailable, offset, length);
601 }
602
603 void
604 mark_value_bytes_unavailable (struct value *value,
605 LONGEST offset, LONGEST length)
606 {
607 mark_value_bits_unavailable (value,
608 offset * TARGET_CHAR_BIT,
609 length * TARGET_CHAR_BIT);
610 }
611
612 /* Find the first range in RANGES that overlaps the range defined by
613 OFFSET and LENGTH, starting at element POS in the RANGES vector,
614 Returns the index into RANGES where such overlapping range was
615 found, or -1 if none was found. */
616
617 static int
618 find_first_range_overlap (VEC(range_s) *ranges, int pos,
619 LONGEST offset, LONGEST length)
620 {
621 range_s *r;
622 int i;
623
624 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
625 if (ranges_overlap (r->offset, r->length, offset, length))
626 return i;
627
628 return -1;
629 }
630
631 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
632 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
633 return non-zero.
634
635 It must always be the case that:
636 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
637
638 It is assumed that memory can be accessed from:
639 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
640 to:
641 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
642 / TARGET_CHAR_BIT) */
643 static int
644 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
645 const gdb_byte *ptr2, size_t offset2_bits,
646 size_t length_bits)
647 {
648 gdb_assert (offset1_bits % TARGET_CHAR_BIT
649 == offset2_bits % TARGET_CHAR_BIT);
650
651 if (offset1_bits % TARGET_CHAR_BIT != 0)
652 {
653 size_t bits;
654 gdb_byte mask, b1, b2;
655
656 /* The offset from the base pointers PTR1 and PTR2 is not a complete
657 number of bytes. A number of bits up to either the next exact
658 byte boundary, or LENGTH_BITS (which ever is sooner) will be
659 compared. */
660 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
661 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
662 mask = (1 << bits) - 1;
663
664 if (length_bits < bits)
665 {
666 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
667 bits = length_bits;
668 }
669
670 /* Now load the two bytes and mask off the bits we care about. */
671 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
672 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
673
674 if (b1 != b2)
675 return 1;
676
677 /* Now update the length and offsets to take account of the bits
678 we've just compared. */
679 length_bits -= bits;
680 offset1_bits += bits;
681 offset2_bits += bits;
682 }
683
684 if (length_bits % TARGET_CHAR_BIT != 0)
685 {
686 size_t bits;
687 size_t o1, o2;
688 gdb_byte mask, b1, b2;
689
690 /* The length is not an exact number of bytes. After the previous
691 IF.. block then the offsets are byte aligned, or the
692 length is zero (in which case this code is not reached). Compare
693 a number of bits at the end of the region, starting from an exact
694 byte boundary. */
695 bits = length_bits % TARGET_CHAR_BIT;
696 o1 = offset1_bits + length_bits - bits;
697 o2 = offset2_bits + length_bits - bits;
698
699 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
700 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
701
702 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
703 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
704
705 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
706 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
707
708 if (b1 != b2)
709 return 1;
710
711 length_bits -= bits;
712 }
713
714 if (length_bits > 0)
715 {
716 /* We've now taken care of any stray "bits" at the start, or end of
717 the region to compare, the remainder can be covered with a simple
718 memcmp. */
719 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
720 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
721 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
722
723 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
724 ptr2 + offset2_bits / TARGET_CHAR_BIT,
725 length_bits / TARGET_CHAR_BIT);
726 }
727
728 /* Length is zero, regions match. */
729 return 0;
730 }
731
732 /* Helper struct for find_first_range_overlap_and_match and
733 value_contents_bits_eq. Keep track of which slot of a given ranges
734 vector have we last looked at. */
735
736 struct ranges_and_idx
737 {
738 /* The ranges. */
739 VEC(range_s) *ranges;
740
741 /* The range we've last found in RANGES. Given ranges are sorted,
742 we can start the next lookup here. */
743 int idx;
744 };
745
746 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
747 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
748 ranges starting at OFFSET2 bits. Return true if the ranges match
749 and fill in *L and *H with the overlapping window relative to
750 (both) OFFSET1 or OFFSET2. */
751
752 static int
753 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
754 struct ranges_and_idx *rp2,
755 LONGEST offset1, LONGEST offset2,
756 LONGEST length, ULONGEST *l, ULONGEST *h)
757 {
758 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
759 offset1, length);
760 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
761 offset2, length);
762
763 if (rp1->idx == -1 && rp2->idx == -1)
764 {
765 *l = length;
766 *h = length;
767 return 1;
768 }
769 else if (rp1->idx == -1 || rp2->idx == -1)
770 return 0;
771 else
772 {
773 range_s *r1, *r2;
774 ULONGEST l1, h1;
775 ULONGEST l2, h2;
776
777 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
778 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
779
780 /* Get the unavailable windows intersected by the incoming
781 ranges. The first and last ranges that overlap the argument
782 range may be wider than said incoming arguments ranges. */
783 l1 = max (offset1, r1->offset);
784 h1 = min (offset1 + length, r1->offset + r1->length);
785
786 l2 = max (offset2, r2->offset);
787 h2 = min (offset2 + length, offset2 + r2->length);
788
789 /* Make them relative to the respective start offsets, so we can
790 compare them for equality. */
791 l1 -= offset1;
792 h1 -= offset1;
793
794 l2 -= offset2;
795 h2 -= offset2;
796
797 /* Different ranges, no match. */
798 if (l1 != l2 || h1 != h2)
799 return 0;
800
801 *h = h1;
802 *l = l1;
803 return 1;
804 }
805 }
806
807 /* Helper function for value_contents_eq. The only difference is that
808 this function is bit rather than byte based.
809
810 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
811 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
812 Return true if the available bits match. */
813
814 static int
815 value_contents_bits_eq (const struct value *val1, int offset1,
816 const struct value *val2, int offset2,
817 int length)
818 {
819 /* Each array element corresponds to a ranges source (unavailable,
820 optimized out). '1' is for VAL1, '2' for VAL2. */
821 struct ranges_and_idx rp1[2], rp2[2];
822
823 /* See function description in value.h. */
824 gdb_assert (!val1->lazy && !val2->lazy);
825
826 /* We shouldn't be trying to compare past the end of the values. */
827 gdb_assert (offset1 + length
828 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
829 gdb_assert (offset2 + length
830 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
831
832 memset (&rp1, 0, sizeof (rp1));
833 memset (&rp2, 0, sizeof (rp2));
834 rp1[0].ranges = val1->unavailable;
835 rp2[0].ranges = val2->unavailable;
836 rp1[1].ranges = val1->optimized_out;
837 rp2[1].ranges = val2->optimized_out;
838
839 while (length > 0)
840 {
841 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
842 int i;
843
844 for (i = 0; i < 2; i++)
845 {
846 ULONGEST l_tmp, h_tmp;
847
848 /* The contents only match equal if the invalid/unavailable
849 contents ranges match as well. */
850 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
851 offset1, offset2, length,
852 &l_tmp, &h_tmp))
853 return 0;
854
855 /* We're interested in the lowest/first range found. */
856 if (i == 0 || l_tmp < l)
857 {
858 l = l_tmp;
859 h = h_tmp;
860 }
861 }
862
863 /* Compare the available/valid contents. */
864 if (memcmp_with_bit_offsets (val1->contents, offset1,
865 val2->contents, offset2, l) != 0)
866 return 0;
867
868 length -= h;
869 offset1 += h;
870 offset2 += h;
871 }
872
873 return 1;
874 }
875
876 int
877 value_contents_eq (const struct value *val1, LONGEST offset1,
878 const struct value *val2, LONGEST offset2,
879 LONGEST length)
880 {
881 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
882 val2, offset2 * TARGET_CHAR_BIT,
883 length * TARGET_CHAR_BIT);
884 }
885
886 /* Prototypes for local functions. */
887
888 static void show_values (char *, int);
889
890 static void show_convenience (char *, int);
891
892
893 /* The value-history records all the values printed
894 by print commands during this session. Each chunk
895 records 60 consecutive values. The first chunk on
896 the chain records the most recent values.
897 The total number of values is in value_history_count. */
898
899 #define VALUE_HISTORY_CHUNK 60
900
901 struct value_history_chunk
902 {
903 struct value_history_chunk *next;
904 struct value *values[VALUE_HISTORY_CHUNK];
905 };
906
907 /* Chain of chunks now in use. */
908
909 static struct value_history_chunk *value_history_chain;
910
911 static int value_history_count; /* Abs number of last entry stored. */
912
913 \f
914 /* List of all value objects currently allocated
915 (except for those released by calls to release_value)
916 This is so they can be freed after each command. */
917
918 static struct value *all_values;
919
920 /* Allocate a lazy value for type TYPE. Its actual content is
921 "lazily" allocated too: the content field of the return value is
922 NULL; it will be allocated when it is fetched from the target. */
923
924 struct value *
925 allocate_value_lazy (struct type *type)
926 {
927 struct value *val;
928
929 /* Call check_typedef on our type to make sure that, if TYPE
930 is a TYPE_CODE_TYPEDEF, its length is set to the length
931 of the target type instead of zero. However, we do not
932 replace the typedef type by the target type, because we want
933 to keep the typedef in order to be able to set the VAL's type
934 description correctly. */
935 check_typedef (type);
936
937 val = XCNEW (struct value);
938 val->contents = NULL;
939 val->next = all_values;
940 all_values = val;
941 val->type = type;
942 val->enclosing_type = type;
943 VALUE_LVAL (val) = not_lval;
944 val->location.address = 0;
945 VALUE_FRAME_ID (val) = null_frame_id;
946 val->offset = 0;
947 val->bitpos = 0;
948 val->bitsize = 0;
949 VALUE_REGNUM (val) = -1;
950 val->lazy = 1;
951 val->embedded_offset = 0;
952 val->pointed_to_offset = 0;
953 val->modifiable = 1;
954 val->initialized = 1; /* Default to initialized. */
955
956 /* Values start out on the all_values chain. */
957 val->reference_count = 1;
958
959 return val;
960 }
961
962 /* The maximum size, in bytes, that GDB will try to allocate for a value.
963 The initial value of 64k was not selected for any specific reason, it is
964 just a reasonable starting point. */
965
966 static int max_value_size = 65536; /* 64k bytes */
967
968 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
969 LONGEST, otherwise GDB will not be able to parse integer values from the
970 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
971 be unable to parse "set max-value-size 2".
972
973 As we want a consistent GDB experience across hosts with different sizes
974 of LONGEST, this arbitrary minimum value was selected, so long as this
975 is bigger than LONGEST on all GDB supported hosts we're fine. */
976
977 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
978 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
979
980 /* Implement the "set max-value-size" command. */
981
982 static void
983 set_max_value_size (char *args, int from_tty,
984 struct cmd_list_element *c)
985 {
986 gdb_assert (max_value_size == -1 || max_value_size >= 0);
987
988 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
989 {
990 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
991 error (_("max-value-size set too low, increasing to %d bytes"),
992 max_value_size);
993 }
994 }
995
996 /* Implement the "show max-value-size" command. */
997
998 static void
999 show_max_value_size (struct ui_file *file, int from_tty,
1000 struct cmd_list_element *c, const char *value)
1001 {
1002 if (max_value_size == -1)
1003 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
1004 else
1005 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
1006 max_value_size);
1007 }
1008
1009 /* Called before we attempt to allocate or reallocate a buffer for the
1010 contents of a value. TYPE is the type of the value for which we are
1011 allocating the buffer. If the buffer is too large (based on the user
1012 controllable setting) then throw an error. If this function returns
1013 then we should attempt to allocate the buffer. */
1014
1015 static void
1016 check_type_length_before_alloc (const struct type *type)
1017 {
1018 unsigned int length = TYPE_LENGTH (type);
1019
1020 if (max_value_size > -1 && length > max_value_size)
1021 {
1022 if (TYPE_NAME (type) != NULL)
1023 error (_("value of type `%s' requires %u bytes, which is more "
1024 "than max-value-size"), TYPE_NAME (type), length);
1025 else
1026 error (_("value requires %u bytes, which is more than "
1027 "max-value-size"), length);
1028 }
1029 }
1030
1031 /* Allocate the contents of VAL if it has not been allocated yet. */
1032
1033 static void
1034 allocate_value_contents (struct value *val)
1035 {
1036 if (!val->contents)
1037 {
1038 check_type_length_before_alloc (val->enclosing_type);
1039 val->contents
1040 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1041 }
1042 }
1043
1044 /* Allocate a value and its contents for type TYPE. */
1045
1046 struct value *
1047 allocate_value (struct type *type)
1048 {
1049 struct value *val = allocate_value_lazy (type);
1050
1051 allocate_value_contents (val);
1052 val->lazy = 0;
1053 return val;
1054 }
1055
1056 /* Allocate a value that has the correct length
1057 for COUNT repetitions of type TYPE. */
1058
1059 struct value *
1060 allocate_repeat_value (struct type *type, int count)
1061 {
1062 int low_bound = current_language->string_lower_bound; /* ??? */
1063 /* FIXME-type-allocation: need a way to free this type when we are
1064 done with it. */
1065 struct type *array_type
1066 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1067
1068 return allocate_value (array_type);
1069 }
1070
1071 struct value *
1072 allocate_computed_value (struct type *type,
1073 const struct lval_funcs *funcs,
1074 void *closure)
1075 {
1076 struct value *v = allocate_value_lazy (type);
1077
1078 VALUE_LVAL (v) = lval_computed;
1079 v->location.computed.funcs = funcs;
1080 v->location.computed.closure = closure;
1081
1082 return v;
1083 }
1084
1085 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1086
1087 struct value *
1088 allocate_optimized_out_value (struct type *type)
1089 {
1090 struct value *retval = allocate_value_lazy (type);
1091
1092 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1093 set_value_lazy (retval, 0);
1094 return retval;
1095 }
1096
1097 /* Accessor methods. */
1098
1099 struct value *
1100 value_next (const struct value *value)
1101 {
1102 return value->next;
1103 }
1104
1105 struct type *
1106 value_type (const struct value *value)
1107 {
1108 return value->type;
1109 }
1110 void
1111 deprecated_set_value_type (struct value *value, struct type *type)
1112 {
1113 value->type = type;
1114 }
1115
1116 LONGEST
1117 value_offset (const struct value *value)
1118 {
1119 return value->offset;
1120 }
1121 void
1122 set_value_offset (struct value *value, LONGEST offset)
1123 {
1124 value->offset = offset;
1125 }
1126
1127 LONGEST
1128 value_bitpos (const struct value *value)
1129 {
1130 return value->bitpos;
1131 }
1132 void
1133 set_value_bitpos (struct value *value, LONGEST bit)
1134 {
1135 value->bitpos = bit;
1136 }
1137
1138 LONGEST
1139 value_bitsize (const struct value *value)
1140 {
1141 return value->bitsize;
1142 }
1143 void
1144 set_value_bitsize (struct value *value, LONGEST bit)
1145 {
1146 value->bitsize = bit;
1147 }
1148
1149 struct value *
1150 value_parent (const struct value *value)
1151 {
1152 return value->parent;
1153 }
1154
1155 /* See value.h. */
1156
1157 void
1158 set_value_parent (struct value *value, struct value *parent)
1159 {
1160 struct value *old = value->parent;
1161
1162 value->parent = parent;
1163 if (parent != NULL)
1164 value_incref (parent);
1165 value_free (old);
1166 }
1167
1168 gdb_byte *
1169 value_contents_raw (struct value *value)
1170 {
1171 struct gdbarch *arch = get_value_arch (value);
1172 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1173
1174 allocate_value_contents (value);
1175 return value->contents + value->embedded_offset * unit_size;
1176 }
1177
1178 gdb_byte *
1179 value_contents_all_raw (struct value *value)
1180 {
1181 allocate_value_contents (value);
1182 return value->contents;
1183 }
1184
1185 struct type *
1186 value_enclosing_type (const struct value *value)
1187 {
1188 return value->enclosing_type;
1189 }
1190
1191 /* Look at value.h for description. */
1192
1193 struct type *
1194 value_actual_type (struct value *value, int resolve_simple_types,
1195 int *real_type_found)
1196 {
1197 struct value_print_options opts;
1198 struct type *result;
1199
1200 get_user_print_options (&opts);
1201
1202 if (real_type_found)
1203 *real_type_found = 0;
1204 result = value_type (value);
1205 if (opts.objectprint)
1206 {
1207 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1208 fetch its rtti type. */
1209 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1210 || TYPE_CODE (result) == TYPE_CODE_REF)
1211 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1212 == TYPE_CODE_STRUCT
1213 && !value_optimized_out (value))
1214 {
1215 struct type *real_type;
1216
1217 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1218 if (real_type)
1219 {
1220 if (real_type_found)
1221 *real_type_found = 1;
1222 result = real_type;
1223 }
1224 }
1225 else if (resolve_simple_types)
1226 {
1227 if (real_type_found)
1228 *real_type_found = 1;
1229 result = value_enclosing_type (value);
1230 }
1231 }
1232
1233 return result;
1234 }
1235
1236 void
1237 error_value_optimized_out (void)
1238 {
1239 error (_("value has been optimized out"));
1240 }
1241
1242 static void
1243 require_not_optimized_out (const struct value *value)
1244 {
1245 if (!VEC_empty (range_s, value->optimized_out))
1246 {
1247 if (value->lval == lval_register)
1248 error (_("register has not been saved in frame"));
1249 else
1250 error_value_optimized_out ();
1251 }
1252 }
1253
1254 static void
1255 require_available (const struct value *value)
1256 {
1257 if (!VEC_empty (range_s, value->unavailable))
1258 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1259 }
1260
1261 const gdb_byte *
1262 value_contents_for_printing (struct value *value)
1263 {
1264 if (value->lazy)
1265 value_fetch_lazy (value);
1266 return value->contents;
1267 }
1268
1269 const gdb_byte *
1270 value_contents_for_printing_const (const struct value *value)
1271 {
1272 gdb_assert (!value->lazy);
1273 return value->contents;
1274 }
1275
1276 const gdb_byte *
1277 value_contents_all (struct value *value)
1278 {
1279 const gdb_byte *result = value_contents_for_printing (value);
1280 require_not_optimized_out (value);
1281 require_available (value);
1282 return result;
1283 }
1284
1285 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1286 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1287
1288 static void
1289 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1290 VEC (range_s) *src_range, int src_bit_offset,
1291 int bit_length)
1292 {
1293 range_s *r;
1294 int i;
1295
1296 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1297 {
1298 ULONGEST h, l;
1299
1300 l = max (r->offset, src_bit_offset);
1301 h = min (r->offset + r->length, src_bit_offset + bit_length);
1302
1303 if (l < h)
1304 insert_into_bit_range_vector (dst_range,
1305 dst_bit_offset + (l - src_bit_offset),
1306 h - l);
1307 }
1308 }
1309
1310 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1311 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1312
1313 static void
1314 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1315 const struct value *src, int src_bit_offset,
1316 int bit_length)
1317 {
1318 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1319 src->unavailable, src_bit_offset,
1320 bit_length);
1321 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1322 src->optimized_out, src_bit_offset,
1323 bit_length);
1324 }
1325
1326 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1327 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1328 contents, starting at DST_OFFSET. If unavailable contents are
1329 being copied from SRC, the corresponding DST contents are marked
1330 unavailable accordingly. Neither DST nor SRC may be lazy
1331 values.
1332
1333 It is assumed the contents of DST in the [DST_OFFSET,
1334 DST_OFFSET+LENGTH) range are wholly available. */
1335
1336 void
1337 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1338 struct value *src, LONGEST src_offset, LONGEST length)
1339 {
1340 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1341 struct gdbarch *arch = get_value_arch (src);
1342 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1343
1344 /* A lazy DST would make that this copy operation useless, since as
1345 soon as DST's contents were un-lazied (by a later value_contents
1346 call, say), the contents would be overwritten. A lazy SRC would
1347 mean we'd be copying garbage. */
1348 gdb_assert (!dst->lazy && !src->lazy);
1349
1350 /* The overwritten DST range gets unavailability ORed in, not
1351 replaced. Make sure to remember to implement replacing if it
1352 turns out actually necessary. */
1353 gdb_assert (value_bytes_available (dst, dst_offset, length));
1354 gdb_assert (!value_bits_any_optimized_out (dst,
1355 TARGET_CHAR_BIT * dst_offset,
1356 TARGET_CHAR_BIT * length));
1357
1358 /* Copy the data. */
1359 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1360 value_contents_all_raw (src) + src_offset * unit_size,
1361 length * unit_size);
1362
1363 /* Copy the meta-data, adjusted. */
1364 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1365 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1366 bit_length = length * unit_size * HOST_CHAR_BIT;
1367
1368 value_ranges_copy_adjusted (dst, dst_bit_offset,
1369 src, src_bit_offset,
1370 bit_length);
1371 }
1372
1373 /* Copy LENGTH bytes of SRC value's (all) contents
1374 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1375 (all) contents, starting at DST_OFFSET. If unavailable contents
1376 are being copied from SRC, the corresponding DST contents are
1377 marked unavailable accordingly. DST must not be lazy. If SRC is
1378 lazy, it will be fetched now.
1379
1380 It is assumed the contents of DST in the [DST_OFFSET,
1381 DST_OFFSET+LENGTH) range are wholly available. */
1382
1383 void
1384 value_contents_copy (struct value *dst, LONGEST dst_offset,
1385 struct value *src, LONGEST src_offset, LONGEST length)
1386 {
1387 if (src->lazy)
1388 value_fetch_lazy (src);
1389
1390 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1391 }
1392
1393 int
1394 value_lazy (const struct value *value)
1395 {
1396 return value->lazy;
1397 }
1398
1399 void
1400 set_value_lazy (struct value *value, int val)
1401 {
1402 value->lazy = val;
1403 }
1404
1405 int
1406 value_stack (const struct value *value)
1407 {
1408 return value->stack;
1409 }
1410
1411 void
1412 set_value_stack (struct value *value, int val)
1413 {
1414 value->stack = val;
1415 }
1416
1417 const gdb_byte *
1418 value_contents (struct value *value)
1419 {
1420 const gdb_byte *result = value_contents_writeable (value);
1421 require_not_optimized_out (value);
1422 require_available (value);
1423 return result;
1424 }
1425
1426 gdb_byte *
1427 value_contents_writeable (struct value *value)
1428 {
1429 if (value->lazy)
1430 value_fetch_lazy (value);
1431 return value_contents_raw (value);
1432 }
1433
1434 int
1435 value_optimized_out (struct value *value)
1436 {
1437 /* We can only know if a value is optimized out once we have tried to
1438 fetch it. */
1439 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1440 {
1441 TRY
1442 {
1443 value_fetch_lazy (value);
1444 }
1445 CATCH (ex, RETURN_MASK_ERROR)
1446 {
1447 /* Fall back to checking value->optimized_out. */
1448 }
1449 END_CATCH
1450 }
1451
1452 return !VEC_empty (range_s, value->optimized_out);
1453 }
1454
1455 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1456 the following LENGTH bytes. */
1457
1458 void
1459 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1460 {
1461 mark_value_bits_optimized_out (value,
1462 offset * TARGET_CHAR_BIT,
1463 length * TARGET_CHAR_BIT);
1464 }
1465
1466 /* See value.h. */
1467
1468 void
1469 mark_value_bits_optimized_out (struct value *value,
1470 LONGEST offset, LONGEST length)
1471 {
1472 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1473 }
1474
1475 int
1476 value_bits_synthetic_pointer (const struct value *value,
1477 LONGEST offset, LONGEST length)
1478 {
1479 if (value->lval != lval_computed
1480 || !value->location.computed.funcs->check_synthetic_pointer)
1481 return 0;
1482 return value->location.computed.funcs->check_synthetic_pointer (value,
1483 offset,
1484 length);
1485 }
1486
1487 LONGEST
1488 value_embedded_offset (const struct value *value)
1489 {
1490 return value->embedded_offset;
1491 }
1492
1493 void
1494 set_value_embedded_offset (struct value *value, LONGEST val)
1495 {
1496 value->embedded_offset = val;
1497 }
1498
1499 LONGEST
1500 value_pointed_to_offset (const struct value *value)
1501 {
1502 return value->pointed_to_offset;
1503 }
1504
1505 void
1506 set_value_pointed_to_offset (struct value *value, LONGEST val)
1507 {
1508 value->pointed_to_offset = val;
1509 }
1510
1511 const struct lval_funcs *
1512 value_computed_funcs (const struct value *v)
1513 {
1514 gdb_assert (value_lval_const (v) == lval_computed);
1515
1516 return v->location.computed.funcs;
1517 }
1518
1519 void *
1520 value_computed_closure (const struct value *v)
1521 {
1522 gdb_assert (v->lval == lval_computed);
1523
1524 return v->location.computed.closure;
1525 }
1526
1527 enum lval_type *
1528 deprecated_value_lval_hack (struct value *value)
1529 {
1530 return &value->lval;
1531 }
1532
1533 enum lval_type
1534 value_lval_const (const struct value *value)
1535 {
1536 return value->lval;
1537 }
1538
1539 CORE_ADDR
1540 value_address (const struct value *value)
1541 {
1542 if (value->lval == lval_internalvar
1543 || value->lval == lval_internalvar_component
1544 || value->lval == lval_xcallable)
1545 return 0;
1546 if (value->parent != NULL)
1547 return value_address (value->parent) + value->offset;
1548 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1549 {
1550 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1551 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1552 }
1553
1554 return value->location.address + value->offset;
1555 }
1556
1557 CORE_ADDR
1558 value_raw_address (const struct value *value)
1559 {
1560 if (value->lval == lval_internalvar
1561 || value->lval == lval_internalvar_component
1562 || value->lval == lval_xcallable)
1563 return 0;
1564 return value->location.address;
1565 }
1566
1567 void
1568 set_value_address (struct value *value, CORE_ADDR addr)
1569 {
1570 gdb_assert (value->lval != lval_internalvar
1571 && value->lval != lval_internalvar_component
1572 && value->lval != lval_xcallable);
1573 value->location.address = addr;
1574 }
1575
1576 struct internalvar **
1577 deprecated_value_internalvar_hack (struct value *value)
1578 {
1579 return &value->location.internalvar;
1580 }
1581
1582 struct frame_id *
1583 deprecated_value_frame_id_hack (struct value *value)
1584 {
1585 return &value->frame_id;
1586 }
1587
1588 short *
1589 deprecated_value_regnum_hack (struct value *value)
1590 {
1591 return &value->regnum;
1592 }
1593
1594 int
1595 deprecated_value_modifiable (const struct value *value)
1596 {
1597 return value->modifiable;
1598 }
1599 \f
1600 /* Return a mark in the value chain. All values allocated after the
1601 mark is obtained (except for those released) are subject to being freed
1602 if a subsequent value_free_to_mark is passed the mark. */
1603 struct value *
1604 value_mark (void)
1605 {
1606 return all_values;
1607 }
1608
1609 /* Take a reference to VAL. VAL will not be deallocated until all
1610 references are released. */
1611
1612 void
1613 value_incref (struct value *val)
1614 {
1615 val->reference_count++;
1616 }
1617
1618 /* Release a reference to VAL, which was acquired with value_incref.
1619 This function is also called to deallocate values from the value
1620 chain. */
1621
1622 void
1623 value_free (struct value *val)
1624 {
1625 if (val)
1626 {
1627 gdb_assert (val->reference_count > 0);
1628 val->reference_count--;
1629 if (val->reference_count > 0)
1630 return;
1631
1632 /* If there's an associated parent value, drop our reference to
1633 it. */
1634 if (val->parent != NULL)
1635 value_free (val->parent);
1636
1637 if (VALUE_LVAL (val) == lval_computed)
1638 {
1639 const struct lval_funcs *funcs = val->location.computed.funcs;
1640
1641 if (funcs->free_closure)
1642 funcs->free_closure (val);
1643 }
1644 else if (VALUE_LVAL (val) == lval_xcallable)
1645 free_xmethod_worker (val->location.xm_worker);
1646
1647 xfree (val->contents);
1648 VEC_free (range_s, val->unavailable);
1649 }
1650 xfree (val);
1651 }
1652
1653 /* Free all values allocated since MARK was obtained by value_mark
1654 (except for those released). */
1655 void
1656 value_free_to_mark (const struct value *mark)
1657 {
1658 struct value *val;
1659 struct value *next;
1660
1661 for (val = all_values; val && val != mark; val = next)
1662 {
1663 next = val->next;
1664 val->released = 1;
1665 value_free (val);
1666 }
1667 all_values = val;
1668 }
1669
1670 /* Free all the values that have been allocated (except for those released).
1671 Call after each command, successful or not.
1672 In practice this is called before each command, which is sufficient. */
1673
1674 void
1675 free_all_values (void)
1676 {
1677 struct value *val;
1678 struct value *next;
1679
1680 for (val = all_values; val; val = next)
1681 {
1682 next = val->next;
1683 val->released = 1;
1684 value_free (val);
1685 }
1686
1687 all_values = 0;
1688 }
1689
1690 /* Frees all the elements in a chain of values. */
1691
1692 void
1693 free_value_chain (struct value *v)
1694 {
1695 struct value *next;
1696
1697 for (; v; v = next)
1698 {
1699 next = value_next (v);
1700 value_free (v);
1701 }
1702 }
1703
1704 /* Remove VAL from the chain all_values
1705 so it will not be freed automatically. */
1706
1707 void
1708 release_value (struct value *val)
1709 {
1710 struct value *v;
1711
1712 if (all_values == val)
1713 {
1714 all_values = val->next;
1715 val->next = NULL;
1716 val->released = 1;
1717 return;
1718 }
1719
1720 for (v = all_values; v; v = v->next)
1721 {
1722 if (v->next == val)
1723 {
1724 v->next = val->next;
1725 val->next = NULL;
1726 val->released = 1;
1727 break;
1728 }
1729 }
1730 }
1731
1732 /* If the value is not already released, release it.
1733 If the value is already released, increment its reference count.
1734 That is, this function ensures that the value is released from the
1735 value chain and that the caller owns a reference to it. */
1736
1737 void
1738 release_value_or_incref (struct value *val)
1739 {
1740 if (val->released)
1741 value_incref (val);
1742 else
1743 release_value (val);
1744 }
1745
1746 /* Release all values up to mark */
1747 struct value *
1748 value_release_to_mark (const struct value *mark)
1749 {
1750 struct value *val;
1751 struct value *next;
1752
1753 for (val = next = all_values; next; next = next->next)
1754 {
1755 if (next->next == mark)
1756 {
1757 all_values = next->next;
1758 next->next = NULL;
1759 return val;
1760 }
1761 next->released = 1;
1762 }
1763 all_values = 0;
1764 return val;
1765 }
1766
1767 /* Return a copy of the value ARG.
1768 It contains the same contents, for same memory address,
1769 but it's a different block of storage. */
1770
1771 struct value *
1772 value_copy (struct value *arg)
1773 {
1774 struct type *encl_type = value_enclosing_type (arg);
1775 struct value *val;
1776
1777 if (value_lazy (arg))
1778 val = allocate_value_lazy (encl_type);
1779 else
1780 val = allocate_value (encl_type);
1781 val->type = arg->type;
1782 VALUE_LVAL (val) = VALUE_LVAL (arg);
1783 val->location = arg->location;
1784 val->offset = arg->offset;
1785 val->bitpos = arg->bitpos;
1786 val->bitsize = arg->bitsize;
1787 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1788 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1789 val->lazy = arg->lazy;
1790 val->embedded_offset = value_embedded_offset (arg);
1791 val->pointed_to_offset = arg->pointed_to_offset;
1792 val->modifiable = arg->modifiable;
1793 if (!value_lazy (val))
1794 {
1795 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1796 TYPE_LENGTH (value_enclosing_type (arg)));
1797
1798 }
1799 val->unavailable = VEC_copy (range_s, arg->unavailable);
1800 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1801 set_value_parent (val, arg->parent);
1802 if (VALUE_LVAL (val) == lval_computed)
1803 {
1804 const struct lval_funcs *funcs = val->location.computed.funcs;
1805
1806 if (funcs->copy_closure)
1807 val->location.computed.closure = funcs->copy_closure (val);
1808 }
1809 return val;
1810 }
1811
1812 /* Return a "const" and/or "volatile" qualified version of the value V.
1813 If CNST is true, then the returned value will be qualified with
1814 "const".
1815 if VOLTL is true, then the returned value will be qualified with
1816 "volatile". */
1817
1818 struct value *
1819 make_cv_value (int cnst, int voltl, struct value *v)
1820 {
1821 struct type *val_type = value_type (v);
1822 struct type *enclosing_type = value_enclosing_type (v);
1823 struct value *cv_val = value_copy (v);
1824
1825 deprecated_set_value_type (cv_val,
1826 make_cv_type (cnst, voltl, val_type, NULL));
1827 set_value_enclosing_type (cv_val,
1828 make_cv_type (cnst, voltl, enclosing_type, NULL));
1829
1830 return cv_val;
1831 }
1832
1833 /* Return a version of ARG that is non-lvalue. */
1834
1835 struct value *
1836 value_non_lval (struct value *arg)
1837 {
1838 if (VALUE_LVAL (arg) != not_lval)
1839 {
1840 struct type *enc_type = value_enclosing_type (arg);
1841 struct value *val = allocate_value (enc_type);
1842
1843 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1844 TYPE_LENGTH (enc_type));
1845 val->type = arg->type;
1846 set_value_embedded_offset (val, value_embedded_offset (arg));
1847 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1848 return val;
1849 }
1850 return arg;
1851 }
1852
1853 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1854
1855 void
1856 value_force_lval (struct value *v, CORE_ADDR addr)
1857 {
1858 gdb_assert (VALUE_LVAL (v) == not_lval);
1859
1860 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1861 v->lval = lval_memory;
1862 v->location.address = addr;
1863 }
1864
1865 void
1866 set_value_component_location (struct value *component,
1867 const struct value *whole)
1868 {
1869 struct type *type;
1870
1871 gdb_assert (whole->lval != lval_xcallable);
1872
1873 if (whole->lval == lval_internalvar)
1874 VALUE_LVAL (component) = lval_internalvar_component;
1875 else
1876 VALUE_LVAL (component) = whole->lval;
1877
1878 component->location = whole->location;
1879 if (whole->lval == lval_computed)
1880 {
1881 const struct lval_funcs *funcs = whole->location.computed.funcs;
1882
1883 if (funcs->copy_closure)
1884 component->location.computed.closure = funcs->copy_closure (whole);
1885 }
1886
1887 /* If type has a dynamic resolved location property
1888 update it's value address. */
1889 type = value_type (whole);
1890 if (NULL != TYPE_DATA_LOCATION (type)
1891 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1892 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1893 }
1894
1895 /* Access to the value history. */
1896
1897 /* Record a new value in the value history.
1898 Returns the absolute history index of the entry. */
1899
1900 int
1901 record_latest_value (struct value *val)
1902 {
1903 int i;
1904
1905 /* We don't want this value to have anything to do with the inferior anymore.
1906 In particular, "set $1 = 50" should not affect the variable from which
1907 the value was taken, and fast watchpoints should be able to assume that
1908 a value on the value history never changes. */
1909 if (value_lazy (val))
1910 value_fetch_lazy (val);
1911 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1912 from. This is a bit dubious, because then *&$1 does not just return $1
1913 but the current contents of that location. c'est la vie... */
1914 val->modifiable = 0;
1915
1916 /* The value may have already been released, in which case we're adding a
1917 new reference for its entry in the history. That is why we call
1918 release_value_or_incref here instead of release_value. */
1919 release_value_or_incref (val);
1920
1921 /* Here we treat value_history_count as origin-zero
1922 and applying to the value being stored now. */
1923
1924 i = value_history_count % VALUE_HISTORY_CHUNK;
1925 if (i == 0)
1926 {
1927 struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
1928
1929 newobj->next = value_history_chain;
1930 value_history_chain = newobj;
1931 }
1932
1933 value_history_chain->values[i] = val;
1934
1935 /* Now we regard value_history_count as origin-one
1936 and applying to the value just stored. */
1937
1938 return ++value_history_count;
1939 }
1940
1941 /* Return a copy of the value in the history with sequence number NUM. */
1942
1943 struct value *
1944 access_value_history (int num)
1945 {
1946 struct value_history_chunk *chunk;
1947 int i;
1948 int absnum = num;
1949
1950 if (absnum <= 0)
1951 absnum += value_history_count;
1952
1953 if (absnum <= 0)
1954 {
1955 if (num == 0)
1956 error (_("The history is empty."));
1957 else if (num == 1)
1958 error (_("There is only one value in the history."));
1959 else
1960 error (_("History does not go back to $$%d."), -num);
1961 }
1962 if (absnum > value_history_count)
1963 error (_("History has not yet reached $%d."), absnum);
1964
1965 absnum--;
1966
1967 /* Now absnum is always absolute and origin zero. */
1968
1969 chunk = value_history_chain;
1970 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1971 - absnum / VALUE_HISTORY_CHUNK;
1972 i > 0; i--)
1973 chunk = chunk->next;
1974
1975 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1976 }
1977
1978 static void
1979 show_values (char *num_exp, int from_tty)
1980 {
1981 int i;
1982 struct value *val;
1983 static int num = 1;
1984
1985 if (num_exp)
1986 {
1987 /* "show values +" should print from the stored position.
1988 "show values <exp>" should print around value number <exp>. */
1989 if (num_exp[0] != '+' || num_exp[1] != '\0')
1990 num = parse_and_eval_long (num_exp) - 5;
1991 }
1992 else
1993 {
1994 /* "show values" means print the last 10 values. */
1995 num = value_history_count - 9;
1996 }
1997
1998 if (num <= 0)
1999 num = 1;
2000
2001 for (i = num; i < num + 10 && i <= value_history_count; i++)
2002 {
2003 struct value_print_options opts;
2004
2005 val = access_value_history (i);
2006 printf_filtered (("$%d = "), i);
2007 get_user_print_options (&opts);
2008 value_print (val, gdb_stdout, &opts);
2009 printf_filtered (("\n"));
2010 }
2011
2012 /* The next "show values +" should start after what we just printed. */
2013 num += 10;
2014
2015 /* Hitting just return after this command should do the same thing as
2016 "show values +". If num_exp is null, this is unnecessary, since
2017 "show values +" is not useful after "show values". */
2018 if (from_tty && num_exp)
2019 {
2020 num_exp[0] = '+';
2021 num_exp[1] = '\0';
2022 }
2023 }
2024 \f
2025 enum internalvar_kind
2026 {
2027 /* The internal variable is empty. */
2028 INTERNALVAR_VOID,
2029
2030 /* The value of the internal variable is provided directly as
2031 a GDB value object. */
2032 INTERNALVAR_VALUE,
2033
2034 /* A fresh value is computed via a call-back routine on every
2035 access to the internal variable. */
2036 INTERNALVAR_MAKE_VALUE,
2037
2038 /* The internal variable holds a GDB internal convenience function. */
2039 INTERNALVAR_FUNCTION,
2040
2041 /* The variable holds an integer value. */
2042 INTERNALVAR_INTEGER,
2043
2044 /* The variable holds a GDB-provided string. */
2045 INTERNALVAR_STRING,
2046 };
2047
2048 union internalvar_data
2049 {
2050 /* A value object used with INTERNALVAR_VALUE. */
2051 struct value *value;
2052
2053 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2054 struct
2055 {
2056 /* The functions to call. */
2057 const struct internalvar_funcs *functions;
2058
2059 /* The function's user-data. */
2060 void *data;
2061 } make_value;
2062
2063 /* The internal function used with INTERNALVAR_FUNCTION. */
2064 struct
2065 {
2066 struct internal_function *function;
2067 /* True if this is the canonical name for the function. */
2068 int canonical;
2069 } fn;
2070
2071 /* An integer value used with INTERNALVAR_INTEGER. */
2072 struct
2073 {
2074 /* If type is non-NULL, it will be used as the type to generate
2075 a value for this internal variable. If type is NULL, a default
2076 integer type for the architecture is used. */
2077 struct type *type;
2078 LONGEST val;
2079 } integer;
2080
2081 /* A string value used with INTERNALVAR_STRING. */
2082 char *string;
2083 };
2084
2085 /* Internal variables. These are variables within the debugger
2086 that hold values assigned by debugger commands.
2087 The user refers to them with a '$' prefix
2088 that does not appear in the variable names stored internally. */
2089
2090 struct internalvar
2091 {
2092 struct internalvar *next;
2093 char *name;
2094
2095 /* We support various different kinds of content of an internal variable.
2096 enum internalvar_kind specifies the kind, and union internalvar_data
2097 provides the data associated with this particular kind. */
2098
2099 enum internalvar_kind kind;
2100
2101 union internalvar_data u;
2102 };
2103
2104 static struct internalvar *internalvars;
2105
2106 /* If the variable does not already exist create it and give it the
2107 value given. If no value is given then the default is zero. */
2108 static void
2109 init_if_undefined_command (char* args, int from_tty)
2110 {
2111 struct internalvar* intvar;
2112
2113 /* Parse the expression - this is taken from set_command(). */
2114 struct expression *expr = parse_expression (args);
2115 register struct cleanup *old_chain =
2116 make_cleanup (free_current_contents, &expr);
2117
2118 /* Validate the expression.
2119 Was the expression an assignment?
2120 Or even an expression at all? */
2121 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2122 error (_("Init-if-undefined requires an assignment expression."));
2123
2124 /* Extract the variable from the parsed expression.
2125 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2126 if (expr->elts[1].opcode != OP_INTERNALVAR)
2127 error (_("The first parameter to init-if-undefined "
2128 "should be a GDB variable."));
2129 intvar = expr->elts[2].internalvar;
2130
2131 /* Only evaluate the expression if the lvalue is void.
2132 This may still fail if the expresssion is invalid. */
2133 if (intvar->kind == INTERNALVAR_VOID)
2134 evaluate_expression (expr);
2135
2136 do_cleanups (old_chain);
2137 }
2138
2139
2140 /* Look up an internal variable with name NAME. NAME should not
2141 normally include a dollar sign.
2142
2143 If the specified internal variable does not exist,
2144 the return value is NULL. */
2145
2146 struct internalvar *
2147 lookup_only_internalvar (const char *name)
2148 {
2149 struct internalvar *var;
2150
2151 for (var = internalvars; var; var = var->next)
2152 if (strcmp (var->name, name) == 0)
2153 return var;
2154
2155 return NULL;
2156 }
2157
2158 /* Complete NAME by comparing it to the names of internal variables.
2159 Returns a vector of newly allocated strings, or NULL if no matches
2160 were found. */
2161
2162 VEC (char_ptr) *
2163 complete_internalvar (const char *name)
2164 {
2165 VEC (char_ptr) *result = NULL;
2166 struct internalvar *var;
2167 int len;
2168
2169 len = strlen (name);
2170
2171 for (var = internalvars; var; var = var->next)
2172 if (strncmp (var->name, name, len) == 0)
2173 {
2174 char *r = xstrdup (var->name);
2175
2176 VEC_safe_push (char_ptr, result, r);
2177 }
2178
2179 return result;
2180 }
2181
2182 /* Create an internal variable with name NAME and with a void value.
2183 NAME should not normally include a dollar sign. */
2184
2185 struct internalvar *
2186 create_internalvar (const char *name)
2187 {
2188 struct internalvar *var = XNEW (struct internalvar);
2189
2190 var->name = concat (name, (char *)NULL);
2191 var->kind = INTERNALVAR_VOID;
2192 var->next = internalvars;
2193 internalvars = var;
2194 return var;
2195 }
2196
2197 /* Create an internal variable with name NAME and register FUN as the
2198 function that value_of_internalvar uses to create a value whenever
2199 this variable is referenced. NAME should not normally include a
2200 dollar sign. DATA is passed uninterpreted to FUN when it is
2201 called. CLEANUP, if not NULL, is called when the internal variable
2202 is destroyed. It is passed DATA as its only argument. */
2203
2204 struct internalvar *
2205 create_internalvar_type_lazy (const char *name,
2206 const struct internalvar_funcs *funcs,
2207 void *data)
2208 {
2209 struct internalvar *var = create_internalvar (name);
2210
2211 var->kind = INTERNALVAR_MAKE_VALUE;
2212 var->u.make_value.functions = funcs;
2213 var->u.make_value.data = data;
2214 return var;
2215 }
2216
2217 /* See documentation in value.h. */
2218
2219 int
2220 compile_internalvar_to_ax (struct internalvar *var,
2221 struct agent_expr *expr,
2222 struct axs_value *value)
2223 {
2224 if (var->kind != INTERNALVAR_MAKE_VALUE
2225 || var->u.make_value.functions->compile_to_ax == NULL)
2226 return 0;
2227
2228 var->u.make_value.functions->compile_to_ax (var, expr, value,
2229 var->u.make_value.data);
2230 return 1;
2231 }
2232
2233 /* Look up an internal variable with name NAME. NAME should not
2234 normally include a dollar sign.
2235
2236 If the specified internal variable does not exist,
2237 one is created, with a void value. */
2238
2239 struct internalvar *
2240 lookup_internalvar (const char *name)
2241 {
2242 struct internalvar *var;
2243
2244 var = lookup_only_internalvar (name);
2245 if (var)
2246 return var;
2247
2248 return create_internalvar (name);
2249 }
2250
2251 /* Return current value of internal variable VAR. For variables that
2252 are not inherently typed, use a value type appropriate for GDBARCH. */
2253
2254 struct value *
2255 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2256 {
2257 struct value *val;
2258 struct trace_state_variable *tsv;
2259
2260 /* If there is a trace state variable of the same name, assume that
2261 is what we really want to see. */
2262 tsv = find_trace_state_variable (var->name);
2263 if (tsv)
2264 {
2265 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2266 &(tsv->value));
2267 if (tsv->value_known)
2268 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2269 tsv->value);
2270 else
2271 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2272 return val;
2273 }
2274
2275 switch (var->kind)
2276 {
2277 case INTERNALVAR_VOID:
2278 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2279 break;
2280
2281 case INTERNALVAR_FUNCTION:
2282 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2283 break;
2284
2285 case INTERNALVAR_INTEGER:
2286 if (!var->u.integer.type)
2287 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2288 var->u.integer.val);
2289 else
2290 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2291 break;
2292
2293 case INTERNALVAR_STRING:
2294 val = value_cstring (var->u.string, strlen (var->u.string),
2295 builtin_type (gdbarch)->builtin_char);
2296 break;
2297
2298 case INTERNALVAR_VALUE:
2299 val = value_copy (var->u.value);
2300 if (value_lazy (val))
2301 value_fetch_lazy (val);
2302 break;
2303
2304 case INTERNALVAR_MAKE_VALUE:
2305 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2306 var->u.make_value.data);
2307 break;
2308
2309 default:
2310 internal_error (__FILE__, __LINE__, _("bad kind"));
2311 }
2312
2313 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2314 on this value go back to affect the original internal variable.
2315
2316 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2317 no underlying modifyable state in the internal variable.
2318
2319 Likewise, if the variable's value is a computed lvalue, we want
2320 references to it to produce another computed lvalue, where
2321 references and assignments actually operate through the
2322 computed value's functions.
2323
2324 This means that internal variables with computed values
2325 behave a little differently from other internal variables:
2326 assignments to them don't just replace the previous value
2327 altogether. At the moment, this seems like the behavior we
2328 want. */
2329
2330 if (var->kind != INTERNALVAR_MAKE_VALUE
2331 && val->lval != lval_computed)
2332 {
2333 VALUE_LVAL (val) = lval_internalvar;
2334 VALUE_INTERNALVAR (val) = var;
2335 }
2336
2337 return val;
2338 }
2339
2340 int
2341 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2342 {
2343 if (var->kind == INTERNALVAR_INTEGER)
2344 {
2345 *result = var->u.integer.val;
2346 return 1;
2347 }
2348
2349 if (var->kind == INTERNALVAR_VALUE)
2350 {
2351 struct type *type = check_typedef (value_type (var->u.value));
2352
2353 if (TYPE_CODE (type) == TYPE_CODE_INT)
2354 {
2355 *result = value_as_long (var->u.value);
2356 return 1;
2357 }
2358 }
2359
2360 return 0;
2361 }
2362
2363 static int
2364 get_internalvar_function (struct internalvar *var,
2365 struct internal_function **result)
2366 {
2367 switch (var->kind)
2368 {
2369 case INTERNALVAR_FUNCTION:
2370 *result = var->u.fn.function;
2371 return 1;
2372
2373 default:
2374 return 0;
2375 }
2376 }
2377
2378 void
2379 set_internalvar_component (struct internalvar *var,
2380 LONGEST offset, LONGEST bitpos,
2381 LONGEST bitsize, struct value *newval)
2382 {
2383 gdb_byte *addr;
2384 struct gdbarch *arch;
2385 int unit_size;
2386
2387 switch (var->kind)
2388 {
2389 case INTERNALVAR_VALUE:
2390 addr = value_contents_writeable (var->u.value);
2391 arch = get_value_arch (var->u.value);
2392 unit_size = gdbarch_addressable_memory_unit_size (arch);
2393
2394 if (bitsize)
2395 modify_field (value_type (var->u.value), addr + offset,
2396 value_as_long (newval), bitpos, bitsize);
2397 else
2398 memcpy (addr + offset * unit_size, value_contents (newval),
2399 TYPE_LENGTH (value_type (newval)));
2400 break;
2401
2402 default:
2403 /* We can never get a component of any other kind. */
2404 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2405 }
2406 }
2407
2408 void
2409 set_internalvar (struct internalvar *var, struct value *val)
2410 {
2411 enum internalvar_kind new_kind;
2412 union internalvar_data new_data = { 0 };
2413
2414 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2415 error (_("Cannot overwrite convenience function %s"), var->name);
2416
2417 /* Prepare new contents. */
2418 switch (TYPE_CODE (check_typedef (value_type (val))))
2419 {
2420 case TYPE_CODE_VOID:
2421 new_kind = INTERNALVAR_VOID;
2422 break;
2423
2424 case TYPE_CODE_INTERNAL_FUNCTION:
2425 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2426 new_kind = INTERNALVAR_FUNCTION;
2427 get_internalvar_function (VALUE_INTERNALVAR (val),
2428 &new_data.fn.function);
2429 /* Copies created here are never canonical. */
2430 break;
2431
2432 default:
2433 new_kind = INTERNALVAR_VALUE;
2434 new_data.value = value_copy (val);
2435 new_data.value->modifiable = 1;
2436
2437 /* Force the value to be fetched from the target now, to avoid problems
2438 later when this internalvar is referenced and the target is gone or
2439 has changed. */
2440 if (value_lazy (new_data.value))
2441 value_fetch_lazy (new_data.value);
2442
2443 /* Release the value from the value chain to prevent it from being
2444 deleted by free_all_values. From here on this function should not
2445 call error () until new_data is installed into the var->u to avoid
2446 leaking memory. */
2447 release_value (new_data.value);
2448
2449 /* Internal variables which are created from values with a dynamic
2450 location don't need the location property of the origin anymore.
2451 The resolved dynamic location is used prior then any other address
2452 when accessing the value.
2453 If we keep it, we would still refer to the origin value.
2454 Remove the location property in case it exist. */
2455 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2456
2457 break;
2458 }
2459
2460 /* Clean up old contents. */
2461 clear_internalvar (var);
2462
2463 /* Switch over. */
2464 var->kind = new_kind;
2465 var->u = new_data;
2466 /* End code which must not call error(). */
2467 }
2468
2469 void
2470 set_internalvar_integer (struct internalvar *var, LONGEST l)
2471 {
2472 /* Clean up old contents. */
2473 clear_internalvar (var);
2474
2475 var->kind = INTERNALVAR_INTEGER;
2476 var->u.integer.type = NULL;
2477 var->u.integer.val = l;
2478 }
2479
2480 void
2481 set_internalvar_string (struct internalvar *var, const char *string)
2482 {
2483 /* Clean up old contents. */
2484 clear_internalvar (var);
2485
2486 var->kind = INTERNALVAR_STRING;
2487 var->u.string = xstrdup (string);
2488 }
2489
2490 static void
2491 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2492 {
2493 /* Clean up old contents. */
2494 clear_internalvar (var);
2495
2496 var->kind = INTERNALVAR_FUNCTION;
2497 var->u.fn.function = f;
2498 var->u.fn.canonical = 1;
2499 /* Variables installed here are always the canonical version. */
2500 }
2501
2502 void
2503 clear_internalvar (struct internalvar *var)
2504 {
2505 /* Clean up old contents. */
2506 switch (var->kind)
2507 {
2508 case INTERNALVAR_VALUE:
2509 value_free (var->u.value);
2510 break;
2511
2512 case INTERNALVAR_STRING:
2513 xfree (var->u.string);
2514 break;
2515
2516 case INTERNALVAR_MAKE_VALUE:
2517 if (var->u.make_value.functions->destroy != NULL)
2518 var->u.make_value.functions->destroy (var->u.make_value.data);
2519 break;
2520
2521 default:
2522 break;
2523 }
2524
2525 /* Reset to void kind. */
2526 var->kind = INTERNALVAR_VOID;
2527 }
2528
2529 char *
2530 internalvar_name (const struct internalvar *var)
2531 {
2532 return var->name;
2533 }
2534
2535 static struct internal_function *
2536 create_internal_function (const char *name,
2537 internal_function_fn handler, void *cookie)
2538 {
2539 struct internal_function *ifn = XNEW (struct internal_function);
2540
2541 ifn->name = xstrdup (name);
2542 ifn->handler = handler;
2543 ifn->cookie = cookie;
2544 return ifn;
2545 }
2546
2547 char *
2548 value_internal_function_name (struct value *val)
2549 {
2550 struct internal_function *ifn;
2551 int result;
2552
2553 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2554 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2555 gdb_assert (result);
2556
2557 return ifn->name;
2558 }
2559
2560 struct value *
2561 call_internal_function (struct gdbarch *gdbarch,
2562 const struct language_defn *language,
2563 struct value *func, int argc, struct value **argv)
2564 {
2565 struct internal_function *ifn;
2566 int result;
2567
2568 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2569 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2570 gdb_assert (result);
2571
2572 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2573 }
2574
2575 /* The 'function' command. This does nothing -- it is just a
2576 placeholder to let "help function NAME" work. This is also used as
2577 the implementation of the sub-command that is created when
2578 registering an internal function. */
2579 static void
2580 function_command (char *command, int from_tty)
2581 {
2582 /* Do nothing. */
2583 }
2584
2585 /* Clean up if an internal function's command is destroyed. */
2586 static void
2587 function_destroyer (struct cmd_list_element *self, void *ignore)
2588 {
2589 xfree ((char *) self->name);
2590 xfree ((char *) self->doc);
2591 }
2592
2593 /* Add a new internal function. NAME is the name of the function; DOC
2594 is a documentation string describing the function. HANDLER is
2595 called when the function is invoked. COOKIE is an arbitrary
2596 pointer which is passed to HANDLER and is intended for "user
2597 data". */
2598 void
2599 add_internal_function (const char *name, const char *doc,
2600 internal_function_fn handler, void *cookie)
2601 {
2602 struct cmd_list_element *cmd;
2603 struct internal_function *ifn;
2604 struct internalvar *var = lookup_internalvar (name);
2605
2606 ifn = create_internal_function (name, handler, cookie);
2607 set_internalvar_function (var, ifn);
2608
2609 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2610 &functionlist);
2611 cmd->destroyer = function_destroyer;
2612 }
2613
2614 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2615 prevent cycles / duplicates. */
2616
2617 void
2618 preserve_one_value (struct value *value, struct objfile *objfile,
2619 htab_t copied_types)
2620 {
2621 if (TYPE_OBJFILE (value->type) == objfile)
2622 value->type = copy_type_recursive (objfile, value->type, copied_types);
2623
2624 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2625 value->enclosing_type = copy_type_recursive (objfile,
2626 value->enclosing_type,
2627 copied_types);
2628 }
2629
2630 /* Likewise for internal variable VAR. */
2631
2632 static void
2633 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2634 htab_t copied_types)
2635 {
2636 switch (var->kind)
2637 {
2638 case INTERNALVAR_INTEGER:
2639 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2640 var->u.integer.type
2641 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2642 break;
2643
2644 case INTERNALVAR_VALUE:
2645 preserve_one_value (var->u.value, objfile, copied_types);
2646 break;
2647 }
2648 }
2649
2650 /* Update the internal variables and value history when OBJFILE is
2651 discarded; we must copy the types out of the objfile. New global types
2652 will be created for every convenience variable which currently points to
2653 this objfile's types, and the convenience variables will be adjusted to
2654 use the new global types. */
2655
2656 void
2657 preserve_values (struct objfile *objfile)
2658 {
2659 htab_t copied_types;
2660 struct value_history_chunk *cur;
2661 struct internalvar *var;
2662 int i;
2663
2664 /* Create the hash table. We allocate on the objfile's obstack, since
2665 it is soon to be deleted. */
2666 copied_types = create_copied_types_hash (objfile);
2667
2668 for (cur = value_history_chain; cur; cur = cur->next)
2669 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2670 if (cur->values[i])
2671 preserve_one_value (cur->values[i], objfile, copied_types);
2672
2673 for (var = internalvars; var; var = var->next)
2674 preserve_one_internalvar (var, objfile, copied_types);
2675
2676 preserve_ext_lang_values (objfile, copied_types);
2677
2678 htab_delete (copied_types);
2679 }
2680
2681 static void
2682 show_convenience (char *ignore, int from_tty)
2683 {
2684 struct gdbarch *gdbarch = get_current_arch ();
2685 struct internalvar *var;
2686 int varseen = 0;
2687 struct value_print_options opts;
2688
2689 get_user_print_options (&opts);
2690 for (var = internalvars; var; var = var->next)
2691 {
2692
2693 if (!varseen)
2694 {
2695 varseen = 1;
2696 }
2697 printf_filtered (("$%s = "), var->name);
2698
2699 TRY
2700 {
2701 struct value *val;
2702
2703 val = value_of_internalvar (gdbarch, var);
2704 value_print (val, gdb_stdout, &opts);
2705 }
2706 CATCH (ex, RETURN_MASK_ERROR)
2707 {
2708 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2709 }
2710 END_CATCH
2711
2712 printf_filtered (("\n"));
2713 }
2714 if (!varseen)
2715 {
2716 /* This text does not mention convenience functions on purpose.
2717 The user can't create them except via Python, and if Python support
2718 is installed this message will never be printed ($_streq will
2719 exist). */
2720 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2721 "Convenience variables have "
2722 "names starting with \"$\";\n"
2723 "use \"set\" as in \"set "
2724 "$foo = 5\" to define them.\n"));
2725 }
2726 }
2727 \f
2728 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2729
2730 struct value *
2731 value_of_xmethod (struct xmethod_worker *worker)
2732 {
2733 if (worker->value == NULL)
2734 {
2735 struct value *v;
2736
2737 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2738 v->lval = lval_xcallable;
2739 v->location.xm_worker = worker;
2740 v->modifiable = 0;
2741 worker->value = v;
2742 }
2743
2744 return worker->value;
2745 }
2746
2747 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2748
2749 struct type *
2750 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2751 {
2752 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2753 && method->lval == lval_xcallable && argc > 0);
2754
2755 return get_xmethod_result_type (method->location.xm_worker,
2756 argv[0], argv + 1, argc - 1);
2757 }
2758
2759 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2760
2761 struct value *
2762 call_xmethod (struct value *method, int argc, struct value **argv)
2763 {
2764 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2765 && method->lval == lval_xcallable && argc > 0);
2766
2767 return invoke_xmethod (method->location.xm_worker,
2768 argv[0], argv + 1, argc - 1);
2769 }
2770 \f
2771 /* Extract a value as a C number (either long or double).
2772 Knows how to convert fixed values to double, or
2773 floating values to long.
2774 Does not deallocate the value. */
2775
2776 LONGEST
2777 value_as_long (struct value *val)
2778 {
2779 /* This coerces arrays and functions, which is necessary (e.g.
2780 in disassemble_command). It also dereferences references, which
2781 I suspect is the most logical thing to do. */
2782 val = coerce_array (val);
2783 return unpack_long (value_type (val), value_contents (val));
2784 }
2785
2786 DOUBLEST
2787 value_as_double (struct value *val)
2788 {
2789 DOUBLEST foo;
2790 int inv;
2791
2792 foo = unpack_double (value_type (val), value_contents (val), &inv);
2793 if (inv)
2794 error (_("Invalid floating value found in program."));
2795 return foo;
2796 }
2797
2798 /* Extract a value as a C pointer. Does not deallocate the value.
2799 Note that val's type may not actually be a pointer; value_as_long
2800 handles all the cases. */
2801 CORE_ADDR
2802 value_as_address (struct value *val)
2803 {
2804 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2805
2806 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2807 whether we want this to be true eventually. */
2808 #if 0
2809 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2810 non-address (e.g. argument to "signal", "info break", etc.), or
2811 for pointers to char, in which the low bits *are* significant. */
2812 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2813 #else
2814
2815 /* There are several targets (IA-64, PowerPC, and others) which
2816 don't represent pointers to functions as simply the address of
2817 the function's entry point. For example, on the IA-64, a
2818 function pointer points to a two-word descriptor, generated by
2819 the linker, which contains the function's entry point, and the
2820 value the IA-64 "global pointer" register should have --- to
2821 support position-independent code. The linker generates
2822 descriptors only for those functions whose addresses are taken.
2823
2824 On such targets, it's difficult for GDB to convert an arbitrary
2825 function address into a function pointer; it has to either find
2826 an existing descriptor for that function, or call malloc and
2827 build its own. On some targets, it is impossible for GDB to
2828 build a descriptor at all: the descriptor must contain a jump
2829 instruction; data memory cannot be executed; and code memory
2830 cannot be modified.
2831
2832 Upon entry to this function, if VAL is a value of type `function'
2833 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2834 value_address (val) is the address of the function. This is what
2835 you'll get if you evaluate an expression like `main'. The call
2836 to COERCE_ARRAY below actually does all the usual unary
2837 conversions, which includes converting values of type `function'
2838 to `pointer to function'. This is the challenging conversion
2839 discussed above. Then, `unpack_long' will convert that pointer
2840 back into an address.
2841
2842 So, suppose the user types `disassemble foo' on an architecture
2843 with a strange function pointer representation, on which GDB
2844 cannot build its own descriptors, and suppose further that `foo'
2845 has no linker-built descriptor. The address->pointer conversion
2846 will signal an error and prevent the command from running, even
2847 though the next step would have been to convert the pointer
2848 directly back into the same address.
2849
2850 The following shortcut avoids this whole mess. If VAL is a
2851 function, just return its address directly. */
2852 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2853 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2854 return value_address (val);
2855
2856 val = coerce_array (val);
2857
2858 /* Some architectures (e.g. Harvard), map instruction and data
2859 addresses onto a single large unified address space. For
2860 instance: An architecture may consider a large integer in the
2861 range 0x10000000 .. 0x1000ffff to already represent a data
2862 addresses (hence not need a pointer to address conversion) while
2863 a small integer would still need to be converted integer to
2864 pointer to address. Just assume such architectures handle all
2865 integer conversions in a single function. */
2866
2867 /* JimB writes:
2868
2869 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2870 must admonish GDB hackers to make sure its behavior matches the
2871 compiler's, whenever possible.
2872
2873 In general, I think GDB should evaluate expressions the same way
2874 the compiler does. When the user copies an expression out of
2875 their source code and hands it to a `print' command, they should
2876 get the same value the compiler would have computed. Any
2877 deviation from this rule can cause major confusion and annoyance,
2878 and needs to be justified carefully. In other words, GDB doesn't
2879 really have the freedom to do these conversions in clever and
2880 useful ways.
2881
2882 AndrewC pointed out that users aren't complaining about how GDB
2883 casts integers to pointers; they are complaining that they can't
2884 take an address from a disassembly listing and give it to `x/i'.
2885 This is certainly important.
2886
2887 Adding an architecture method like integer_to_address() certainly
2888 makes it possible for GDB to "get it right" in all circumstances
2889 --- the target has complete control over how things get done, so
2890 people can Do The Right Thing for their target without breaking
2891 anyone else. The standard doesn't specify how integers get
2892 converted to pointers; usually, the ABI doesn't either, but
2893 ABI-specific code is a more reasonable place to handle it. */
2894
2895 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2896 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2897 && gdbarch_integer_to_address_p (gdbarch))
2898 return gdbarch_integer_to_address (gdbarch, value_type (val),
2899 value_contents (val));
2900
2901 return unpack_long (value_type (val), value_contents (val));
2902 #endif
2903 }
2904 \f
2905 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2906 as a long, or as a double, assuming the raw data is described
2907 by type TYPE. Knows how to convert different sizes of values
2908 and can convert between fixed and floating point. We don't assume
2909 any alignment for the raw data. Return value is in host byte order.
2910
2911 If you want functions and arrays to be coerced to pointers, and
2912 references to be dereferenced, call value_as_long() instead.
2913
2914 C++: It is assumed that the front-end has taken care of
2915 all matters concerning pointers to members. A pointer
2916 to member which reaches here is considered to be equivalent
2917 to an INT (or some size). After all, it is only an offset. */
2918
2919 LONGEST
2920 unpack_long (struct type *type, const gdb_byte *valaddr)
2921 {
2922 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2923 enum type_code code = TYPE_CODE (type);
2924 int len = TYPE_LENGTH (type);
2925 int nosign = TYPE_UNSIGNED (type);
2926
2927 switch (code)
2928 {
2929 case TYPE_CODE_TYPEDEF:
2930 return unpack_long (check_typedef (type), valaddr);
2931 case TYPE_CODE_ENUM:
2932 case TYPE_CODE_FLAGS:
2933 case TYPE_CODE_BOOL:
2934 case TYPE_CODE_INT:
2935 case TYPE_CODE_CHAR:
2936 case TYPE_CODE_RANGE:
2937 case TYPE_CODE_MEMBERPTR:
2938 if (nosign)
2939 return extract_unsigned_integer (valaddr, len, byte_order);
2940 else
2941 return extract_signed_integer (valaddr, len, byte_order);
2942
2943 case TYPE_CODE_FLT:
2944 return (LONGEST) extract_typed_floating (valaddr, type);
2945
2946 case TYPE_CODE_DECFLOAT:
2947 /* libdecnumber has a function to convert from decimal to integer, but
2948 it doesn't work when the decimal number has a fractional part. */
2949 return (LONGEST) decimal_to_doublest (valaddr, len, byte_order);
2950
2951 case TYPE_CODE_PTR:
2952 case TYPE_CODE_REF:
2953 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2954 whether we want this to be true eventually. */
2955 return extract_typed_address (valaddr, type);
2956
2957 default:
2958 error (_("Value can't be converted to integer."));
2959 }
2960 return 0; /* Placate lint. */
2961 }
2962
2963 /* Return a double value from the specified type and address.
2964 INVP points to an int which is set to 0 for valid value,
2965 1 for invalid value (bad float format). In either case,
2966 the returned double is OK to use. Argument is in target
2967 format, result is in host format. */
2968
2969 DOUBLEST
2970 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2971 {
2972 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2973 enum type_code code;
2974 int len;
2975 int nosign;
2976
2977 *invp = 0; /* Assume valid. */
2978 type = check_typedef (type);
2979 code = TYPE_CODE (type);
2980 len = TYPE_LENGTH (type);
2981 nosign = TYPE_UNSIGNED (type);
2982 if (code == TYPE_CODE_FLT)
2983 {
2984 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2985 floating-point value was valid (using the macro
2986 INVALID_FLOAT). That test/macro have been removed.
2987
2988 It turns out that only the VAX defined this macro and then
2989 only in a non-portable way. Fixing the portability problem
2990 wouldn't help since the VAX floating-point code is also badly
2991 bit-rotten. The target needs to add definitions for the
2992 methods gdbarch_float_format and gdbarch_double_format - these
2993 exactly describe the target floating-point format. The
2994 problem here is that the corresponding floatformat_vax_f and
2995 floatformat_vax_d values these methods should be set to are
2996 also not defined either. Oops!
2997
2998 Hopefully someone will add both the missing floatformat
2999 definitions and the new cases for floatformat_is_valid (). */
3000
3001 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
3002 {
3003 *invp = 1;
3004 return 0.0;
3005 }
3006
3007 return extract_typed_floating (valaddr, type);
3008 }
3009 else if (code == TYPE_CODE_DECFLOAT)
3010 return decimal_to_doublest (valaddr, len, byte_order);
3011 else if (nosign)
3012 {
3013 /* Unsigned -- be sure we compensate for signed LONGEST. */
3014 return (ULONGEST) unpack_long (type, valaddr);
3015 }
3016 else
3017 {
3018 /* Signed -- we are OK with unpack_long. */
3019 return unpack_long (type, valaddr);
3020 }
3021 }
3022
3023 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
3024 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3025 We don't assume any alignment for the raw data. Return value is in
3026 host byte order.
3027
3028 If you want functions and arrays to be coerced to pointers, and
3029 references to be dereferenced, call value_as_address() instead.
3030
3031 C++: It is assumed that the front-end has taken care of
3032 all matters concerning pointers to members. A pointer
3033 to member which reaches here is considered to be equivalent
3034 to an INT (or some size). After all, it is only an offset. */
3035
3036 CORE_ADDR
3037 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3038 {
3039 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3040 whether we want this to be true eventually. */
3041 return unpack_long (type, valaddr);
3042 }
3043
3044 \f
3045 /* Get the value of the FIELDNO'th field (which must be static) of
3046 TYPE. */
3047
3048 struct value *
3049 value_static_field (struct type *type, int fieldno)
3050 {
3051 struct value *retval;
3052
3053 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
3054 {
3055 case FIELD_LOC_KIND_PHYSADDR:
3056 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3057 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
3058 break;
3059 case FIELD_LOC_KIND_PHYSNAME:
3060 {
3061 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
3062 /* TYPE_FIELD_NAME (type, fieldno); */
3063 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3064
3065 if (sym.symbol == NULL)
3066 {
3067 /* With some compilers, e.g. HP aCC, static data members are
3068 reported as non-debuggable symbols. */
3069 struct bound_minimal_symbol msym
3070 = lookup_minimal_symbol (phys_name, NULL, NULL);
3071
3072 if (!msym.minsym)
3073 return allocate_optimized_out_value (type);
3074 else
3075 {
3076 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
3077 BMSYMBOL_VALUE_ADDRESS (msym));
3078 }
3079 }
3080 else
3081 retval = value_of_variable (sym.symbol, sym.block);
3082 break;
3083 }
3084 default:
3085 gdb_assert_not_reached ("unexpected field location kind");
3086 }
3087
3088 return retval;
3089 }
3090
3091 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3092 You have to be careful here, since the size of the data area for the value
3093 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3094 than the old enclosing type, you have to allocate more space for the
3095 data. */
3096
3097 void
3098 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3099 {
3100 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3101 {
3102 check_type_length_before_alloc (new_encl_type);
3103 val->contents
3104 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
3105 }
3106
3107 val->enclosing_type = new_encl_type;
3108 }
3109
3110 /* Given a value ARG1 (offset by OFFSET bytes)
3111 of a struct or union type ARG_TYPE,
3112 extract and return the value of one of its (non-static) fields.
3113 FIELDNO says which field. */
3114
3115 struct value *
3116 value_primitive_field (struct value *arg1, LONGEST offset,
3117 int fieldno, struct type *arg_type)
3118 {
3119 struct value *v;
3120 struct type *type;
3121 struct gdbarch *arch = get_value_arch (arg1);
3122 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3123
3124 arg_type = check_typedef (arg_type);
3125 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3126
3127 /* Call check_typedef on our type to make sure that, if TYPE
3128 is a TYPE_CODE_TYPEDEF, its length is set to the length
3129 of the target type instead of zero. However, we do not
3130 replace the typedef type by the target type, because we want
3131 to keep the typedef in order to be able to print the type
3132 description correctly. */
3133 check_typedef (type);
3134
3135 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3136 {
3137 /* Handle packed fields.
3138
3139 Create a new value for the bitfield, with bitpos and bitsize
3140 set. If possible, arrange offset and bitpos so that we can
3141 do a single aligned read of the size of the containing type.
3142 Otherwise, adjust offset to the byte containing the first
3143 bit. Assume that the address, offset, and embedded offset
3144 are sufficiently aligned. */
3145
3146 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3147 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3148
3149 v = allocate_value_lazy (type);
3150 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3151 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3152 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3153 v->bitpos = bitpos % container_bitsize;
3154 else
3155 v->bitpos = bitpos % 8;
3156 v->offset = (value_embedded_offset (arg1)
3157 + offset
3158 + (bitpos - v->bitpos) / 8);
3159 set_value_parent (v, arg1);
3160 if (!value_lazy (arg1))
3161 value_fetch_lazy (v);
3162 }
3163 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3164 {
3165 /* This field is actually a base subobject, so preserve the
3166 entire object's contents for later references to virtual
3167 bases, etc. */
3168 LONGEST boffset;
3169
3170 /* Lazy register values with offsets are not supported. */
3171 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3172 value_fetch_lazy (arg1);
3173
3174 /* We special case virtual inheritance here because this
3175 requires access to the contents, which we would rather avoid
3176 for references to ordinary fields of unavailable values. */
3177 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3178 boffset = baseclass_offset (arg_type, fieldno,
3179 value_contents (arg1),
3180 value_embedded_offset (arg1),
3181 value_address (arg1),
3182 arg1);
3183 else
3184 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3185
3186 if (value_lazy (arg1))
3187 v = allocate_value_lazy (value_enclosing_type (arg1));
3188 else
3189 {
3190 v = allocate_value (value_enclosing_type (arg1));
3191 value_contents_copy_raw (v, 0, arg1, 0,
3192 TYPE_LENGTH (value_enclosing_type (arg1)));
3193 }
3194 v->type = type;
3195 v->offset = value_offset (arg1);
3196 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3197 }
3198 else if (NULL != TYPE_DATA_LOCATION (type))
3199 {
3200 /* Field is a dynamic data member. */
3201
3202 gdb_assert (0 == offset);
3203 /* We expect an already resolved data location. */
3204 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3205 /* For dynamic data types defer memory allocation
3206 until we actual access the value. */
3207 v = allocate_value_lazy (type);
3208 }
3209 else
3210 {
3211 /* Plain old data member */
3212 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3213 / (HOST_CHAR_BIT * unit_size));
3214
3215 /* Lazy register values with offsets are not supported. */
3216 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3217 value_fetch_lazy (arg1);
3218
3219 if (value_lazy (arg1))
3220 v = allocate_value_lazy (type);
3221 else
3222 {
3223 v = allocate_value (type);
3224 value_contents_copy_raw (v, value_embedded_offset (v),
3225 arg1, value_embedded_offset (arg1) + offset,
3226 type_length_units (type));
3227 }
3228 v->offset = (value_offset (arg1) + offset
3229 + value_embedded_offset (arg1));
3230 }
3231 set_value_component_location (v, arg1);
3232 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3233 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3234 return v;
3235 }
3236
3237 /* Given a value ARG1 of a struct or union type,
3238 extract and return the value of one of its (non-static) fields.
3239 FIELDNO says which field. */
3240
3241 struct value *
3242 value_field (struct value *arg1, int fieldno)
3243 {
3244 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3245 }
3246
3247 /* Return a non-virtual function as a value.
3248 F is the list of member functions which contains the desired method.
3249 J is an index into F which provides the desired method.
3250
3251 We only use the symbol for its address, so be happy with either a
3252 full symbol or a minimal symbol. */
3253
3254 struct value *
3255 value_fn_field (struct value **arg1p, struct fn_field *f,
3256 int j, struct type *type,
3257 LONGEST offset)
3258 {
3259 struct value *v;
3260 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3261 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3262 struct symbol *sym;
3263 struct bound_minimal_symbol msym;
3264
3265 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3266 if (sym != NULL)
3267 {
3268 memset (&msym, 0, sizeof (msym));
3269 }
3270 else
3271 {
3272 gdb_assert (sym == NULL);
3273 msym = lookup_bound_minimal_symbol (physname);
3274 if (msym.minsym == NULL)
3275 return NULL;
3276 }
3277
3278 v = allocate_value (ftype);
3279 if (sym)
3280 {
3281 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3282 }
3283 else
3284 {
3285 /* The minimal symbol might point to a function descriptor;
3286 resolve it to the actual code address instead. */
3287 struct objfile *objfile = msym.objfile;
3288 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3289
3290 set_value_address (v,
3291 gdbarch_convert_from_func_ptr_addr
3292 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3293 }
3294
3295 if (arg1p)
3296 {
3297 if (type != value_type (*arg1p))
3298 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3299 value_addr (*arg1p)));
3300
3301 /* Move the `this' pointer according to the offset.
3302 VALUE_OFFSET (*arg1p) += offset; */
3303 }
3304
3305 return v;
3306 }
3307
3308 \f
3309
3310 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3311 VALADDR, and store the result in *RESULT.
3312 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3313
3314 Extracting bits depends on endianness of the machine. Compute the
3315 number of least significant bits to discard. For big endian machines,
3316 we compute the total number of bits in the anonymous object, subtract
3317 off the bit count from the MSB of the object to the MSB of the
3318 bitfield, then the size of the bitfield, which leaves the LSB discard
3319 count. For little endian machines, the discard count is simply the
3320 number of bits from the LSB of the anonymous object to the LSB of the
3321 bitfield.
3322
3323 If the field is signed, we also do sign extension. */
3324
3325 static LONGEST
3326 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3327 LONGEST bitpos, LONGEST bitsize)
3328 {
3329 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3330 ULONGEST val;
3331 ULONGEST valmask;
3332 int lsbcount;
3333 LONGEST bytes_read;
3334 LONGEST read_offset;
3335
3336 /* Read the minimum number of bytes required; there may not be
3337 enough bytes to read an entire ULONGEST. */
3338 field_type = check_typedef (field_type);
3339 if (bitsize)
3340 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3341 else
3342 bytes_read = TYPE_LENGTH (field_type);
3343
3344 read_offset = bitpos / 8;
3345
3346 val = extract_unsigned_integer (valaddr + read_offset,
3347 bytes_read, byte_order);
3348
3349 /* Extract bits. See comment above. */
3350
3351 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3352 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3353 else
3354 lsbcount = (bitpos % 8);
3355 val >>= lsbcount;
3356
3357 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3358 If the field is signed, and is negative, then sign extend. */
3359
3360 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3361 {
3362 valmask = (((ULONGEST) 1) << bitsize) - 1;
3363 val &= valmask;
3364 if (!TYPE_UNSIGNED (field_type))
3365 {
3366 if (val & (valmask ^ (valmask >> 1)))
3367 {
3368 val |= ~valmask;
3369 }
3370 }
3371 }
3372
3373 return val;
3374 }
3375
3376 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3377 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3378 ORIGINAL_VALUE, which must not be NULL. See
3379 unpack_value_bits_as_long for more details. */
3380
3381 int
3382 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3383 LONGEST embedded_offset, int fieldno,
3384 const struct value *val, LONGEST *result)
3385 {
3386 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3387 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3388 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3389 int bit_offset;
3390
3391 gdb_assert (val != NULL);
3392
3393 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3394 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3395 || !value_bits_available (val, bit_offset, bitsize))
3396 return 0;
3397
3398 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3399 bitpos, bitsize);
3400 return 1;
3401 }
3402
3403 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3404 object at VALADDR. See unpack_bits_as_long for more details. */
3405
3406 LONGEST
3407 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3408 {
3409 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3410 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3411 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3412
3413 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3414 }
3415
3416 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3417 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3418 the contents in DEST_VAL, zero or sign extending if the type of
3419 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3420 VAL. If the VAL's contents required to extract the bitfield from
3421 are unavailable/optimized out, DEST_VAL is correspondingly
3422 marked unavailable/optimized out. */
3423
3424 void
3425 unpack_value_bitfield (struct value *dest_val,
3426 LONGEST bitpos, LONGEST bitsize,
3427 const gdb_byte *valaddr, LONGEST embedded_offset,
3428 const struct value *val)
3429 {
3430 enum bfd_endian byte_order;
3431 int src_bit_offset;
3432 int dst_bit_offset;
3433 struct type *field_type = value_type (dest_val);
3434
3435 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3436
3437 /* First, unpack and sign extend the bitfield as if it was wholly
3438 valid. Optimized out/unavailable bits are read as zero, but
3439 that's OK, as they'll end up marked below. If the VAL is
3440 wholly-invalid we may have skipped allocating its contents,
3441 though. See allocate_optimized_out_value. */
3442 if (valaddr != NULL)
3443 {
3444 LONGEST num;
3445
3446 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3447 bitpos, bitsize);
3448 store_signed_integer (value_contents_raw (dest_val),
3449 TYPE_LENGTH (field_type), byte_order, num);
3450 }
3451
3452 /* Now copy the optimized out / unavailability ranges to the right
3453 bits. */
3454 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3455 if (byte_order == BFD_ENDIAN_BIG)
3456 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3457 else
3458 dst_bit_offset = 0;
3459 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3460 val, src_bit_offset, bitsize);
3461 }
3462
3463 /* Return a new value with type TYPE, which is FIELDNO field of the
3464 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3465 of VAL. If the VAL's contents required to extract the bitfield
3466 from are unavailable/optimized out, the new value is
3467 correspondingly marked unavailable/optimized out. */
3468
3469 struct value *
3470 value_field_bitfield (struct type *type, int fieldno,
3471 const gdb_byte *valaddr,
3472 LONGEST embedded_offset, const struct value *val)
3473 {
3474 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3475 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3476 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3477
3478 unpack_value_bitfield (res_val, bitpos, bitsize,
3479 valaddr, embedded_offset, val);
3480
3481 return res_val;
3482 }
3483
3484 /* Modify the value of a bitfield. ADDR points to a block of memory in
3485 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3486 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3487 indicate which bits (in target bit order) comprise the bitfield.
3488 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3489 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3490
3491 void
3492 modify_field (struct type *type, gdb_byte *addr,
3493 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3494 {
3495 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3496 ULONGEST oword;
3497 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3498 LONGEST bytesize;
3499
3500 /* Normalize BITPOS. */
3501 addr += bitpos / 8;
3502 bitpos %= 8;
3503
3504 /* If a negative fieldval fits in the field in question, chop
3505 off the sign extension bits. */
3506 if ((~fieldval & ~(mask >> 1)) == 0)
3507 fieldval &= mask;
3508
3509 /* Warn if value is too big to fit in the field in question. */
3510 if (0 != (fieldval & ~mask))
3511 {
3512 /* FIXME: would like to include fieldval in the message, but
3513 we don't have a sprintf_longest. */
3514 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3515
3516 /* Truncate it, otherwise adjoining fields may be corrupted. */
3517 fieldval &= mask;
3518 }
3519
3520 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3521 false valgrind reports. */
3522
3523 bytesize = (bitpos + bitsize + 7) / 8;
3524 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3525
3526 /* Shifting for bit field depends on endianness of the target machine. */
3527 if (gdbarch_bits_big_endian (get_type_arch (type)))
3528 bitpos = bytesize * 8 - bitpos - bitsize;
3529
3530 oword &= ~(mask << bitpos);
3531 oword |= fieldval << bitpos;
3532
3533 store_unsigned_integer (addr, bytesize, byte_order, oword);
3534 }
3535 \f
3536 /* Pack NUM into BUF using a target format of TYPE. */
3537
3538 void
3539 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3540 {
3541 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3542 LONGEST len;
3543
3544 type = check_typedef (type);
3545 len = TYPE_LENGTH (type);
3546
3547 switch (TYPE_CODE (type))
3548 {
3549 case TYPE_CODE_INT:
3550 case TYPE_CODE_CHAR:
3551 case TYPE_CODE_ENUM:
3552 case TYPE_CODE_FLAGS:
3553 case TYPE_CODE_BOOL:
3554 case TYPE_CODE_RANGE:
3555 case TYPE_CODE_MEMBERPTR:
3556 store_signed_integer (buf, len, byte_order, num);
3557 break;
3558
3559 case TYPE_CODE_REF:
3560 case TYPE_CODE_PTR:
3561 store_typed_address (buf, type, (CORE_ADDR) num);
3562 break;
3563
3564 default:
3565 error (_("Unexpected type (%d) encountered for integer constant."),
3566 TYPE_CODE (type));
3567 }
3568 }
3569
3570
3571 /* Pack NUM into BUF using a target format of TYPE. */
3572
3573 static void
3574 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3575 {
3576 LONGEST len;
3577 enum bfd_endian byte_order;
3578
3579 type = check_typedef (type);
3580 len = TYPE_LENGTH (type);
3581 byte_order = gdbarch_byte_order (get_type_arch (type));
3582
3583 switch (TYPE_CODE (type))
3584 {
3585 case TYPE_CODE_INT:
3586 case TYPE_CODE_CHAR:
3587 case TYPE_CODE_ENUM:
3588 case TYPE_CODE_FLAGS:
3589 case TYPE_CODE_BOOL:
3590 case TYPE_CODE_RANGE:
3591 case TYPE_CODE_MEMBERPTR:
3592 store_unsigned_integer (buf, len, byte_order, num);
3593 break;
3594
3595 case TYPE_CODE_REF:
3596 case TYPE_CODE_PTR:
3597 store_typed_address (buf, type, (CORE_ADDR) num);
3598 break;
3599
3600 default:
3601 error (_("Unexpected type (%d) encountered "
3602 "for unsigned integer constant."),
3603 TYPE_CODE (type));
3604 }
3605 }
3606
3607
3608 /* Convert C numbers into newly allocated values. */
3609
3610 struct value *
3611 value_from_longest (struct type *type, LONGEST num)
3612 {
3613 struct value *val = allocate_value (type);
3614
3615 pack_long (value_contents_raw (val), type, num);
3616 return val;
3617 }
3618
3619
3620 /* Convert C unsigned numbers into newly allocated values. */
3621
3622 struct value *
3623 value_from_ulongest (struct type *type, ULONGEST num)
3624 {
3625 struct value *val = allocate_value (type);
3626
3627 pack_unsigned_long (value_contents_raw (val), type, num);
3628
3629 return val;
3630 }
3631
3632
3633 /* Create a value representing a pointer of type TYPE to the address
3634 ADDR. */
3635
3636 struct value *
3637 value_from_pointer (struct type *type, CORE_ADDR addr)
3638 {
3639 struct value *val = allocate_value (type);
3640
3641 store_typed_address (value_contents_raw (val),
3642 check_typedef (type), addr);
3643 return val;
3644 }
3645
3646
3647 /* Create a value of type TYPE whose contents come from VALADDR, if it
3648 is non-null, and whose memory address (in the inferior) is
3649 ADDRESS. The type of the created value may differ from the passed
3650 type TYPE. Make sure to retrieve values new type after this call.
3651 Note that TYPE is not passed through resolve_dynamic_type; this is
3652 a special API intended for use only by Ada. */
3653
3654 struct value *
3655 value_from_contents_and_address_unresolved (struct type *type,
3656 const gdb_byte *valaddr,
3657 CORE_ADDR address)
3658 {
3659 struct value *v;
3660
3661 if (valaddr == NULL)
3662 v = allocate_value_lazy (type);
3663 else
3664 v = value_from_contents (type, valaddr);
3665 set_value_address (v, address);
3666 VALUE_LVAL (v) = lval_memory;
3667 return v;
3668 }
3669
3670 /* Create a value of type TYPE whose contents come from VALADDR, if it
3671 is non-null, and whose memory address (in the inferior) is
3672 ADDRESS. The type of the created value may differ from the passed
3673 type TYPE. Make sure to retrieve values new type after this call. */
3674
3675 struct value *
3676 value_from_contents_and_address (struct type *type,
3677 const gdb_byte *valaddr,
3678 CORE_ADDR address)
3679 {
3680 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3681 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3682 struct value *v;
3683
3684 if (valaddr == NULL)
3685 v = allocate_value_lazy (resolved_type);
3686 else
3687 v = value_from_contents (resolved_type, valaddr);
3688 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3689 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3690 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3691 set_value_address (v, address);
3692 VALUE_LVAL (v) = lval_memory;
3693 return v;
3694 }
3695
3696 /* Create a value of type TYPE holding the contents CONTENTS.
3697 The new value is `not_lval'. */
3698
3699 struct value *
3700 value_from_contents (struct type *type, const gdb_byte *contents)
3701 {
3702 struct value *result;
3703
3704 result = allocate_value (type);
3705 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3706 return result;
3707 }
3708
3709 struct value *
3710 value_from_double (struct type *type, DOUBLEST num)
3711 {
3712 struct value *val = allocate_value (type);
3713 struct type *base_type = check_typedef (type);
3714 enum type_code code = TYPE_CODE (base_type);
3715
3716 if (code == TYPE_CODE_FLT)
3717 {
3718 store_typed_floating (value_contents_raw (val), base_type, num);
3719 }
3720 else
3721 error (_("Unexpected type encountered for floating constant."));
3722
3723 return val;
3724 }
3725
3726 struct value *
3727 value_from_decfloat (struct type *type, const gdb_byte *dec)
3728 {
3729 struct value *val = allocate_value (type);
3730
3731 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3732 return val;
3733 }
3734
3735 /* Extract a value from the history file. Input will be of the form
3736 $digits or $$digits. See block comment above 'write_dollar_variable'
3737 for details. */
3738
3739 struct value *
3740 value_from_history_ref (const char *h, const char **endp)
3741 {
3742 int index, len;
3743
3744 if (h[0] == '$')
3745 len = 1;
3746 else
3747 return NULL;
3748
3749 if (h[1] == '$')
3750 len = 2;
3751
3752 /* Find length of numeral string. */
3753 for (; isdigit (h[len]); len++)
3754 ;
3755
3756 /* Make sure numeral string is not part of an identifier. */
3757 if (h[len] == '_' || isalpha (h[len]))
3758 return NULL;
3759
3760 /* Now collect the index value. */
3761 if (h[1] == '$')
3762 {
3763 if (len == 2)
3764 {
3765 /* For some bizarre reason, "$$" is equivalent to "$$1",
3766 rather than to "$$0" as it ought to be! */
3767 index = -1;
3768 *endp += len;
3769 }
3770 else
3771 {
3772 char *local_end;
3773
3774 index = -strtol (&h[2], &local_end, 10);
3775 *endp = local_end;
3776 }
3777 }
3778 else
3779 {
3780 if (len == 1)
3781 {
3782 /* "$" is equivalent to "$0". */
3783 index = 0;
3784 *endp += len;
3785 }
3786 else
3787 {
3788 char *local_end;
3789
3790 index = strtol (&h[1], &local_end, 10);
3791 *endp = local_end;
3792 }
3793 }
3794
3795 return access_value_history (index);
3796 }
3797
3798 struct value *
3799 coerce_ref_if_computed (const struct value *arg)
3800 {
3801 const struct lval_funcs *funcs;
3802
3803 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3804 return NULL;
3805
3806 if (value_lval_const (arg) != lval_computed)
3807 return NULL;
3808
3809 funcs = value_computed_funcs (arg);
3810 if (funcs->coerce_ref == NULL)
3811 return NULL;
3812
3813 return funcs->coerce_ref (arg);
3814 }
3815
3816 /* Look at value.h for description. */
3817
3818 struct value *
3819 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3820 const struct type *original_type,
3821 const struct value *original_value)
3822 {
3823 /* Re-adjust type. */
3824 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3825
3826 /* Add embedding info. */
3827 set_value_enclosing_type (value, enc_type);
3828 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3829
3830 /* We may be pointing to an object of some derived type. */
3831 return value_full_object (value, NULL, 0, 0, 0);
3832 }
3833
3834 struct value *
3835 coerce_ref (struct value *arg)
3836 {
3837 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3838 struct value *retval;
3839 struct type *enc_type;
3840
3841 retval = coerce_ref_if_computed (arg);
3842 if (retval)
3843 return retval;
3844
3845 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3846 return arg;
3847
3848 enc_type = check_typedef (value_enclosing_type (arg));
3849 enc_type = TYPE_TARGET_TYPE (enc_type);
3850
3851 retval = value_at_lazy (enc_type,
3852 unpack_pointer (value_type (arg),
3853 value_contents (arg)));
3854 enc_type = value_type (retval);
3855 return readjust_indirect_value_type (retval, enc_type,
3856 value_type_arg_tmp, arg);
3857 }
3858
3859 struct value *
3860 coerce_array (struct value *arg)
3861 {
3862 struct type *type;
3863
3864 arg = coerce_ref (arg);
3865 type = check_typedef (value_type (arg));
3866
3867 switch (TYPE_CODE (type))
3868 {
3869 case TYPE_CODE_ARRAY:
3870 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3871 arg = value_coerce_array (arg);
3872 break;
3873 case TYPE_CODE_FUNC:
3874 arg = value_coerce_function (arg);
3875 break;
3876 }
3877 return arg;
3878 }
3879 \f
3880
3881 /* Return the return value convention that will be used for the
3882 specified type. */
3883
3884 enum return_value_convention
3885 struct_return_convention (struct gdbarch *gdbarch,
3886 struct value *function, struct type *value_type)
3887 {
3888 enum type_code code = TYPE_CODE (value_type);
3889
3890 if (code == TYPE_CODE_ERROR)
3891 error (_("Function return type unknown."));
3892
3893 /* Probe the architecture for the return-value convention. */
3894 return gdbarch_return_value (gdbarch, function, value_type,
3895 NULL, NULL, NULL);
3896 }
3897
3898 /* Return true if the function returning the specified type is using
3899 the convention of returning structures in memory (passing in the
3900 address as a hidden first parameter). */
3901
3902 int
3903 using_struct_return (struct gdbarch *gdbarch,
3904 struct value *function, struct type *value_type)
3905 {
3906 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3907 /* A void return value is never in memory. See also corresponding
3908 code in "print_return_value". */
3909 return 0;
3910
3911 return (struct_return_convention (gdbarch, function, value_type)
3912 != RETURN_VALUE_REGISTER_CONVENTION);
3913 }
3914
3915 /* Set the initialized field in a value struct. */
3916
3917 void
3918 set_value_initialized (struct value *val, int status)
3919 {
3920 val->initialized = status;
3921 }
3922
3923 /* Return the initialized field in a value struct. */
3924
3925 int
3926 value_initialized (const struct value *val)
3927 {
3928 return val->initialized;
3929 }
3930
3931 /* Load the actual content of a lazy value. Fetch the data from the
3932 user's process and clear the lazy flag to indicate that the data in
3933 the buffer is valid.
3934
3935 If the value is zero-length, we avoid calling read_memory, which
3936 would abort. We mark the value as fetched anyway -- all 0 bytes of
3937 it. */
3938
3939 void
3940 value_fetch_lazy (struct value *val)
3941 {
3942 gdb_assert (value_lazy (val));
3943 allocate_value_contents (val);
3944 /* A value is either lazy, or fully fetched. The
3945 availability/validity is only established as we try to fetch a
3946 value. */
3947 gdb_assert (VEC_empty (range_s, val->optimized_out));
3948 gdb_assert (VEC_empty (range_s, val->unavailable));
3949 if (value_bitsize (val))
3950 {
3951 /* To read a lazy bitfield, read the entire enclosing value. This
3952 prevents reading the same block of (possibly volatile) memory once
3953 per bitfield. It would be even better to read only the containing
3954 word, but we have no way to record that just specific bits of a
3955 value have been fetched. */
3956 struct type *type = check_typedef (value_type (val));
3957 struct value *parent = value_parent (val);
3958
3959 if (value_lazy (parent))
3960 value_fetch_lazy (parent);
3961
3962 unpack_value_bitfield (val,
3963 value_bitpos (val), value_bitsize (val),
3964 value_contents_for_printing (parent),
3965 value_offset (val), parent);
3966 }
3967 else if (VALUE_LVAL (val) == lval_memory)
3968 {
3969 CORE_ADDR addr = value_address (val);
3970 struct type *type = check_typedef (value_enclosing_type (val));
3971
3972 if (TYPE_LENGTH (type))
3973 read_value_memory (val, 0, value_stack (val),
3974 addr, value_contents_all_raw (val),
3975 type_length_units (type));
3976 }
3977 else if (VALUE_LVAL (val) == lval_register)
3978 {
3979 struct frame_info *frame;
3980 int regnum;
3981 struct type *type = check_typedef (value_type (val));
3982 struct value *new_val = val, *mark = value_mark ();
3983
3984 /* Offsets are not supported here; lazy register values must
3985 refer to the entire register. */
3986 gdb_assert (value_offset (val) == 0);
3987
3988 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3989 {
3990 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3991
3992 frame = frame_find_by_id (frame_id);
3993 regnum = VALUE_REGNUM (new_val);
3994
3995 gdb_assert (frame != NULL);
3996
3997 /* Convertible register routines are used for multi-register
3998 values and for interpretation in different types
3999 (e.g. float or int from a double register). Lazy
4000 register values should have the register's natural type,
4001 so they do not apply. */
4002 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
4003 regnum, type));
4004
4005 new_val = get_frame_register_value (frame, regnum);
4006
4007 /* If we get another lazy lval_register value, it means the
4008 register is found by reading it from the next frame.
4009 get_frame_register_value should never return a value with
4010 the frame id pointing to FRAME. If it does, it means we
4011 either have two consecutive frames with the same frame id
4012 in the frame chain, or some code is trying to unwind
4013 behind get_prev_frame's back (e.g., a frame unwind
4014 sniffer trying to unwind), bypassing its validations. In
4015 any case, it should always be an internal error to end up
4016 in this situation. */
4017 if (VALUE_LVAL (new_val) == lval_register
4018 && value_lazy (new_val)
4019 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
4020 internal_error (__FILE__, __LINE__,
4021 _("infinite loop while fetching a register"));
4022 }
4023
4024 /* If it's still lazy (for instance, a saved register on the
4025 stack), fetch it. */
4026 if (value_lazy (new_val))
4027 value_fetch_lazy (new_val);
4028
4029 /* Copy the contents and the unavailability/optimized-out
4030 meta-data from NEW_VAL to VAL. */
4031 set_value_lazy (val, 0);
4032 value_contents_copy (val, value_embedded_offset (val),
4033 new_val, value_embedded_offset (new_val),
4034 type_length_units (type));
4035
4036 if (frame_debug)
4037 {
4038 struct gdbarch *gdbarch;
4039 frame = frame_find_by_id (VALUE_FRAME_ID (val));
4040 regnum = VALUE_REGNUM (val);
4041 gdbarch = get_frame_arch (frame);
4042
4043 fprintf_unfiltered (gdb_stdlog,
4044 "{ value_fetch_lazy "
4045 "(frame=%d,regnum=%d(%s),...) ",
4046 frame_relative_level (frame), regnum,
4047 user_reg_map_regnum_to_name (gdbarch, regnum));
4048
4049 fprintf_unfiltered (gdb_stdlog, "->");
4050 if (value_optimized_out (new_val))
4051 {
4052 fprintf_unfiltered (gdb_stdlog, " ");
4053 val_print_optimized_out (new_val, gdb_stdlog);
4054 }
4055 else
4056 {
4057 int i;
4058 const gdb_byte *buf = value_contents (new_val);
4059
4060 if (VALUE_LVAL (new_val) == lval_register)
4061 fprintf_unfiltered (gdb_stdlog, " register=%d",
4062 VALUE_REGNUM (new_val));
4063 else if (VALUE_LVAL (new_val) == lval_memory)
4064 fprintf_unfiltered (gdb_stdlog, " address=%s",
4065 paddress (gdbarch,
4066 value_address (new_val)));
4067 else
4068 fprintf_unfiltered (gdb_stdlog, " computed");
4069
4070 fprintf_unfiltered (gdb_stdlog, " bytes=");
4071 fprintf_unfiltered (gdb_stdlog, "[");
4072 for (i = 0; i < register_size (gdbarch, regnum); i++)
4073 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4074 fprintf_unfiltered (gdb_stdlog, "]");
4075 }
4076
4077 fprintf_unfiltered (gdb_stdlog, " }\n");
4078 }
4079
4080 /* Dispose of the intermediate values. This prevents
4081 watchpoints from trying to watch the saved frame pointer. */
4082 value_free_to_mark (mark);
4083 }
4084 else if (VALUE_LVAL (val) == lval_computed
4085 && value_computed_funcs (val)->read != NULL)
4086 value_computed_funcs (val)->read (val);
4087 else
4088 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4089
4090 set_value_lazy (val, 0);
4091 }
4092
4093 /* Implementation of the convenience function $_isvoid. */
4094
4095 static struct value *
4096 isvoid_internal_fn (struct gdbarch *gdbarch,
4097 const struct language_defn *language,
4098 void *cookie, int argc, struct value **argv)
4099 {
4100 int ret;
4101
4102 if (argc != 1)
4103 error (_("You must provide one argument for $_isvoid."));
4104
4105 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
4106
4107 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4108 }
4109
4110 void
4111 _initialize_values (void)
4112 {
4113 add_cmd ("convenience", no_class, show_convenience, _("\
4114 Debugger convenience (\"$foo\") variables and functions.\n\
4115 Convenience variables are created when you assign them values;\n\
4116 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4117 \n\
4118 A few convenience variables are given values automatically:\n\
4119 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4120 \"$__\" holds the contents of the last address examined with \"x\"."
4121 #ifdef HAVE_PYTHON
4122 "\n\n\
4123 Convenience functions are defined via the Python API."
4124 #endif
4125 ), &showlist);
4126 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4127
4128 add_cmd ("values", no_set_class, show_values, _("\
4129 Elements of value history around item number IDX (or last ten)."),
4130 &showlist);
4131
4132 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4133 Initialize a convenience variable if necessary.\n\
4134 init-if-undefined VARIABLE = EXPRESSION\n\
4135 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4136 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4137 VARIABLE is already initialized."));
4138
4139 add_prefix_cmd ("function", no_class, function_command, _("\
4140 Placeholder command for showing help on convenience functions."),
4141 &functionlist, "function ", 0, &cmdlist);
4142
4143 add_internal_function ("_isvoid", _("\
4144 Check whether an expression is void.\n\
4145 Usage: $_isvoid (expression)\n\
4146 Return 1 if the expression is void, zero otherwise."),
4147 isvoid_internal_fn, NULL);
4148
4149 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4150 class_support, &max_value_size, _("\
4151 Set maximum sized value gdb will load from the inferior."), _("\
4152 Show maximum sized value gdb will load from the inferior."), _("\
4153 Use this to control the maximum size, in bytes, of a value that gdb\n\
4154 will load from the inferior. Setting this value to 'unlimited'\n\
4155 disables checking.\n\
4156 Setting this does not invalidate already allocated values, it only\n\
4157 prevents future values, larger than this size, from being allocated."),
4158 set_max_value_size,
4159 show_max_value_size,
4160 &setlist, &showlist);
4161 }
This page took 0.141775 seconds and 4 git commands to generate.