1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "arch-utils.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
40 #include "tracepoint.h"
42 #include "user-regs.h"
44 /* Prototypes for exported functions. */
46 void _initialize_values (void);
48 /* Definition of a user function. */
49 struct internal_function
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
57 internal_function_fn handler
;
59 /* User data for the handler. */
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
67 /* Lowest offset in the range. */
70 /* Length of the range. */
74 typedef struct range range_s
;
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
82 ranges_overlap (int offset1
, int len1
,
83 int offset2
, int len2
)
87 l
= max (offset1
, offset2
);
88 h
= min (offset1
+ len1
, offset2
+ len2
);
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
98 range_lessthan (const range_s
*r1
, const range_s
*r2
)
100 return r1
->offset
< r2
->offset
;
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 ranges_contain (VEC(range_s
) *ranges
, int offset
, int length
)
112 what
.offset
= offset
;
113 what
.length
= length
;
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
125 |---| |---| |------| ... |--|
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
135 Then we need to check if the I range overlaps the I range itself.
140 |---| |---| |-------| ... |--|
146 i
= VEC_lower_bound (range_s
, ranges
, &what
, range_lessthan
);
150 struct range
*bef
= VEC_index (range_s
, ranges
, i
- 1);
152 if (ranges_overlap (bef
->offset
, bef
->length
, offset
, length
))
156 if (i
< VEC_length (range_s
, ranges
))
158 struct range
*r
= VEC_index (range_s
, ranges
, i
);
160 if (ranges_overlap (r
->offset
, r
->length
, offset
, length
))
167 static struct cmd_list_element
*functionlist
;
169 /* Note that the fields in this structure are arranged to save a bit
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable
: 1;
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy
: 1;
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized
: 1;
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack
: 1;
204 /* If the value has been released. */
205 unsigned int released
: 1;
207 /* Register number if the value is from a register. */
210 /* Location of value (if lval). */
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
218 /* Pointer to internal variable. */
219 struct internalvar
*internalvar
;
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker
*xm_worker
;
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
229 /* Functions to call. */
230 const struct lval_funcs
*funcs
;
232 /* Closure for those functions to use. */
237 /* Describes offset of a value within lval of a structure in bytes.
238 If lval == lval_memory, this is an offset to the address. If
239 lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the
241 member embedded_offset below. */
244 /* Only used for bitfields; number of bits contained in them. */
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
262 struct value
*parent
;
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id
;
268 /* Type of the value. */
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in bytes. The
295 value_contents() macro takes `embedded_offset' into account, so
296 most GDB code continues to see the `type' portion of the value,
297 just as the inferior would.
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in bytes from the full object to the pointed-to object
302 -- that is, the value `embedded_offset' would have if we followed
303 the pointer and fetched the complete object. (I don't really see
304 the point. Why not just determine the run-time type when you
305 indirect, and avoid the special case? The contents don't matter
306 until you indirect anyway.)
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
311 struct type
*enclosing_type
;
313 int pointed_to_offset
;
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s
) *unavailable
;
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s
) *optimized_out
;
344 value_bits_available (const struct value
*value
, int offset
, int length
)
346 gdb_assert (!value
->lazy
);
348 return !ranges_contain (value
->unavailable
, offset
, length
);
352 value_bytes_available (const struct value
*value
, int offset
, int length
)
354 return value_bits_available (value
,
355 offset
* TARGET_CHAR_BIT
,
356 length
* TARGET_CHAR_BIT
);
360 value_bits_any_optimized_out (const struct value
*value
, int bit_offset
, int bit_length
)
362 gdb_assert (!value
->lazy
);
364 return ranges_contain (value
->optimized_out
, bit_offset
, bit_length
);
368 value_entirely_available (struct value
*value
)
370 /* We can only tell whether the whole value is available when we try
373 value_fetch_lazy (value
);
375 if (VEC_empty (range_s
, value
->unavailable
))
380 /* Returns true if VALUE is entirely covered by RANGES. If the value
381 is lazy, it'll be read now. Note that RANGE is a pointer to
382 pointer because reading the value might change *RANGE. */
385 value_entirely_covered_by_range_vector (struct value
*value
,
386 VEC(range_s
) **ranges
)
388 /* We can only tell whether the whole value is optimized out /
389 unavailable when we try to read it. */
391 value_fetch_lazy (value
);
393 if (VEC_length (range_s
, *ranges
) == 1)
395 struct range
*t
= VEC_index (range_s
, *ranges
, 0);
398 && t
->length
== (TARGET_CHAR_BIT
399 * TYPE_LENGTH (value_enclosing_type (value
))))
407 value_entirely_unavailable (struct value
*value
)
409 return value_entirely_covered_by_range_vector (value
, &value
->unavailable
);
413 value_entirely_optimized_out (struct value
*value
)
415 return value_entirely_covered_by_range_vector (value
, &value
->optimized_out
);
418 /* Insert into the vector pointed to by VECTORP the bit range starting of
419 OFFSET bits, and extending for the next LENGTH bits. */
422 insert_into_bit_range_vector (VEC(range_s
) **vectorp
, int offset
, int length
)
427 /* Insert the range sorted. If there's overlap or the new range
428 would be contiguous with an existing range, merge. */
430 newr
.offset
= offset
;
431 newr
.length
= length
;
433 /* Do a binary search for the position the given range would be
434 inserted if we only considered the starting OFFSET of ranges.
435 Call that position I. Since we also have LENGTH to care for
436 (this is a range afterall), we need to check if the _previous_
437 range overlaps the I range. E.g., calling R the new range:
439 #1 - overlaps with previous
443 |---| |---| |------| ... |--|
448 In the case #1 above, the binary search would return `I=1',
449 meaning, this OFFSET should be inserted at position 1, and the
450 current position 1 should be pushed further (and become 2). But,
451 note that `0' overlaps with R, so we want to merge them.
453 A similar consideration needs to be taken if the new range would
454 be contiguous with the previous range:
456 #2 - contiguous with previous
460 |--| |---| |------| ... |--|
465 If there's no overlap with the previous range, as in:
467 #3 - not overlapping and not contiguous
471 |--| |---| |------| ... |--|
478 #4 - R is the range with lowest offset
482 |--| |---| |------| ... |--|
487 ... we just push the new range to I.
489 All the 4 cases above need to consider that the new range may
490 also overlap several of the ranges that follow, or that R may be
491 contiguous with the following range, and merge. E.g.,
493 #5 - overlapping following ranges
496 |------------------------|
497 |--| |---| |------| ... |--|
506 |--| |---| |------| ... |--|
513 i
= VEC_lower_bound (range_s
, *vectorp
, &newr
, range_lessthan
);
516 struct range
*bef
= VEC_index (range_s
, *vectorp
, i
- 1);
518 if (ranges_overlap (bef
->offset
, bef
->length
, offset
, length
))
521 ULONGEST l
= min (bef
->offset
, offset
);
522 ULONGEST h
= max (bef
->offset
+ bef
->length
, offset
+ length
);
528 else if (offset
== bef
->offset
+ bef
->length
)
531 bef
->length
+= length
;
537 VEC_safe_insert (range_s
, *vectorp
, i
, &newr
);
543 VEC_safe_insert (range_s
, *vectorp
, i
, &newr
);
546 /* Check whether the ranges following the one we've just added or
547 touched can be folded in (#5 above). */
548 if (i
+ 1 < VEC_length (range_s
, *vectorp
))
555 /* Get the range we just touched. */
556 t
= VEC_index (range_s
, *vectorp
, i
);
560 for (; VEC_iterate (range_s
, *vectorp
, i
, r
); i
++)
561 if (r
->offset
<= t
->offset
+ t
->length
)
565 l
= min (t
->offset
, r
->offset
);
566 h
= max (t
->offset
+ t
->length
, r
->offset
+ r
->length
);
575 /* If we couldn't merge this one, we won't be able to
576 merge following ones either, since the ranges are
577 always sorted by OFFSET. */
582 VEC_block_remove (range_s
, *vectorp
, next
, removed
);
587 mark_value_bits_unavailable (struct value
*value
, int offset
, int length
)
589 insert_into_bit_range_vector (&value
->unavailable
, offset
, length
);
593 mark_value_bytes_unavailable (struct value
*value
, int offset
, int length
)
595 mark_value_bits_unavailable (value
,
596 offset
* TARGET_CHAR_BIT
,
597 length
* TARGET_CHAR_BIT
);
600 /* Find the first range in RANGES that overlaps the range defined by
601 OFFSET and LENGTH, starting at element POS in the RANGES vector,
602 Returns the index into RANGES where such overlapping range was
603 found, or -1 if none was found. */
606 find_first_range_overlap (VEC(range_s
) *ranges
, int pos
,
607 int offset
, int length
)
612 for (i
= pos
; VEC_iterate (range_s
, ranges
, i
, r
); i
++)
613 if (ranges_overlap (r
->offset
, r
->length
, offset
, length
))
619 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
620 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
623 It must always be the case that:
624 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
626 It is assumed that memory can be accessed from:
627 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
629 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
630 / TARGET_CHAR_BIT) */
632 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
633 const gdb_byte
*ptr2
, size_t offset2_bits
,
636 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
637 == offset2_bits
% TARGET_CHAR_BIT
);
639 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
642 gdb_byte mask
, b1
, b2
;
644 /* The offset from the base pointers PTR1 and PTR2 is not a complete
645 number of bytes. A number of bits up to either the next exact
646 byte boundary, or LENGTH_BITS (which ever is sooner) will be
648 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
649 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
650 mask
= (1 << bits
) - 1;
652 if (length_bits
< bits
)
654 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
658 /* Now load the two bytes and mask off the bits we care about. */
659 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
660 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
665 /* Now update the length and offsets to take account of the bits
666 we've just compared. */
668 offset1_bits
+= bits
;
669 offset2_bits
+= bits
;
672 if (length_bits
% TARGET_CHAR_BIT
!= 0)
676 gdb_byte mask
, b1
, b2
;
678 /* The length is not an exact number of bytes. After the previous
679 IF.. block then the offsets are byte aligned, or the
680 length is zero (in which case this code is not reached). Compare
681 a number of bits at the end of the region, starting from an exact
683 bits
= length_bits
% TARGET_CHAR_BIT
;
684 o1
= offset1_bits
+ length_bits
- bits
;
685 o2
= offset2_bits
+ length_bits
- bits
;
687 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
688 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
690 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
691 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
693 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
694 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
704 /* We've now taken care of any stray "bits" at the start, or end of
705 the region to compare, the remainder can be covered with a simple
707 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
708 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
709 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
711 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
712 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
713 length_bits
/ TARGET_CHAR_BIT
);
716 /* Length is zero, regions match. */
720 /* Helper struct for find_first_range_overlap_and_match and
721 value_contents_bits_eq. Keep track of which slot of a given ranges
722 vector have we last looked at. */
724 struct ranges_and_idx
727 VEC(range_s
) *ranges
;
729 /* The range we've last found in RANGES. Given ranges are sorted,
730 we can start the next lookup here. */
734 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
735 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
736 ranges starting at OFFSET2 bits. Return true if the ranges match
737 and fill in *L and *H with the overlapping window relative to
738 (both) OFFSET1 or OFFSET2. */
741 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
742 struct ranges_and_idx
*rp2
,
743 int offset1
, int offset2
,
744 int length
, ULONGEST
*l
, ULONGEST
*h
)
746 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
748 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
751 if (rp1
->idx
== -1 && rp2
->idx
== -1)
757 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
765 r1
= VEC_index (range_s
, rp1
->ranges
, rp1
->idx
);
766 r2
= VEC_index (range_s
, rp2
->ranges
, rp2
->idx
);
768 /* Get the unavailable windows intersected by the incoming
769 ranges. The first and last ranges that overlap the argument
770 range may be wider than said incoming arguments ranges. */
771 l1
= max (offset1
, r1
->offset
);
772 h1
= min (offset1
+ length
, r1
->offset
+ r1
->length
);
774 l2
= max (offset2
, r2
->offset
);
775 h2
= min (offset2
+ length
, offset2
+ r2
->length
);
777 /* Make them relative to the respective start offsets, so we can
778 compare them for equality. */
785 /* Different ranges, no match. */
786 if (l1
!= l2
|| h1
!= h2
)
795 /* Helper function for value_contents_eq. The only difference is that
796 this function is bit rather than byte based.
798 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
799 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
800 Return true if the available bits match. */
803 value_contents_bits_eq (const struct value
*val1
, int offset1
,
804 const struct value
*val2
, int offset2
,
807 /* Each array element corresponds to a ranges source (unavailable,
808 optimized out). '1' is for VAL1, '2' for VAL2. */
809 struct ranges_and_idx rp1
[2], rp2
[2];
811 /* See function description in value.h. */
812 gdb_assert (!val1
->lazy
&& !val2
->lazy
);
814 /* We shouldn't be trying to compare past the end of the values. */
815 gdb_assert (offset1
+ length
816 <= TYPE_LENGTH (val1
->enclosing_type
) * TARGET_CHAR_BIT
);
817 gdb_assert (offset2
+ length
818 <= TYPE_LENGTH (val2
->enclosing_type
) * TARGET_CHAR_BIT
);
820 memset (&rp1
, 0, sizeof (rp1
));
821 memset (&rp2
, 0, sizeof (rp2
));
822 rp1
[0].ranges
= val1
->unavailable
;
823 rp2
[0].ranges
= val2
->unavailable
;
824 rp1
[1].ranges
= val1
->optimized_out
;
825 rp2
[1].ranges
= val2
->optimized_out
;
829 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
832 for (i
= 0; i
< 2; i
++)
834 ULONGEST l_tmp
, h_tmp
;
836 /* The contents only match equal if the invalid/unavailable
837 contents ranges match as well. */
838 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
839 offset1
, offset2
, length
,
843 /* We're interested in the lowest/first range found. */
844 if (i
== 0 || l_tmp
< l
)
851 /* Compare the available/valid contents. */
852 if (memcmp_with_bit_offsets (val1
->contents
, offset1
,
853 val2
->contents
, offset2
, l
) != 0)
865 value_contents_eq (const struct value
*val1
, int offset1
,
866 const struct value
*val2
, int offset2
,
869 return value_contents_bits_eq (val1
, offset1
* TARGET_CHAR_BIT
,
870 val2
, offset2
* TARGET_CHAR_BIT
,
871 length
* TARGET_CHAR_BIT
);
874 /* Prototypes for local functions. */
876 static void show_values (char *, int);
878 static void show_convenience (char *, int);
881 /* The value-history records all the values printed
882 by print commands during this session. Each chunk
883 records 60 consecutive values. The first chunk on
884 the chain records the most recent values.
885 The total number of values is in value_history_count. */
887 #define VALUE_HISTORY_CHUNK 60
889 struct value_history_chunk
891 struct value_history_chunk
*next
;
892 struct value
*values
[VALUE_HISTORY_CHUNK
];
895 /* Chain of chunks now in use. */
897 static struct value_history_chunk
*value_history_chain
;
899 static int value_history_count
; /* Abs number of last entry stored. */
902 /* List of all value objects currently allocated
903 (except for those released by calls to release_value)
904 This is so they can be freed after each command. */
906 static struct value
*all_values
;
908 /* Allocate a lazy value for type TYPE. Its actual content is
909 "lazily" allocated too: the content field of the return value is
910 NULL; it will be allocated when it is fetched from the target. */
913 allocate_value_lazy (struct type
*type
)
917 /* Call check_typedef on our type to make sure that, if TYPE
918 is a TYPE_CODE_TYPEDEF, its length is set to the length
919 of the target type instead of zero. However, we do not
920 replace the typedef type by the target type, because we want
921 to keep the typedef in order to be able to set the VAL's type
922 description correctly. */
923 check_typedef (type
);
925 val
= (struct value
*) xzalloc (sizeof (struct value
));
926 val
->contents
= NULL
;
927 val
->next
= all_values
;
930 val
->enclosing_type
= type
;
931 VALUE_LVAL (val
) = not_lval
;
932 val
->location
.address
= 0;
933 VALUE_FRAME_ID (val
) = null_frame_id
;
937 VALUE_REGNUM (val
) = -1;
939 val
->embedded_offset
= 0;
940 val
->pointed_to_offset
= 0;
942 val
->initialized
= 1; /* Default to initialized. */
944 /* Values start out on the all_values chain. */
945 val
->reference_count
= 1;
950 /* Allocate the contents of VAL if it has not been allocated yet. */
953 allocate_value_contents (struct value
*val
)
956 val
->contents
= (gdb_byte
*) xzalloc (TYPE_LENGTH (val
->enclosing_type
));
959 /* Allocate a value and its contents for type TYPE. */
962 allocate_value (struct type
*type
)
964 struct value
*val
= allocate_value_lazy (type
);
966 allocate_value_contents (val
);
971 /* Allocate a value that has the correct length
972 for COUNT repetitions of type TYPE. */
975 allocate_repeat_value (struct type
*type
, int count
)
977 int low_bound
= current_language
->string_lower_bound
; /* ??? */
978 /* FIXME-type-allocation: need a way to free this type when we are
980 struct type
*array_type
981 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
983 return allocate_value (array_type
);
987 allocate_computed_value (struct type
*type
,
988 const struct lval_funcs
*funcs
,
991 struct value
*v
= allocate_value_lazy (type
);
993 VALUE_LVAL (v
) = lval_computed
;
994 v
->location
.computed
.funcs
= funcs
;
995 v
->location
.computed
.closure
= closure
;
1000 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1003 allocate_optimized_out_value (struct type
*type
)
1005 struct value
*retval
= allocate_value_lazy (type
);
1007 mark_value_bytes_optimized_out (retval
, 0, TYPE_LENGTH (type
));
1008 set_value_lazy (retval
, 0);
1012 /* Accessor methods. */
1015 value_next (struct value
*value
)
1021 value_type (const struct value
*value
)
1026 deprecated_set_value_type (struct value
*value
, struct type
*type
)
1032 value_offset (const struct value
*value
)
1034 return value
->offset
;
1037 set_value_offset (struct value
*value
, int offset
)
1039 value
->offset
= offset
;
1043 value_bitpos (const struct value
*value
)
1045 return value
->bitpos
;
1048 set_value_bitpos (struct value
*value
, int bit
)
1050 value
->bitpos
= bit
;
1054 value_bitsize (const struct value
*value
)
1056 return value
->bitsize
;
1059 set_value_bitsize (struct value
*value
, int bit
)
1061 value
->bitsize
= bit
;
1065 value_parent (struct value
*value
)
1067 return value
->parent
;
1073 set_value_parent (struct value
*value
, struct value
*parent
)
1075 struct value
*old
= value
->parent
;
1077 value
->parent
= parent
;
1079 value_incref (parent
);
1084 value_contents_raw (struct value
*value
)
1086 allocate_value_contents (value
);
1087 return value
->contents
+ value
->embedded_offset
;
1091 value_contents_all_raw (struct value
*value
)
1093 allocate_value_contents (value
);
1094 return value
->contents
;
1098 value_enclosing_type (struct value
*value
)
1100 return value
->enclosing_type
;
1103 /* Look at value.h for description. */
1106 value_actual_type (struct value
*value
, int resolve_simple_types
,
1107 int *real_type_found
)
1109 struct value_print_options opts
;
1110 struct type
*result
;
1112 get_user_print_options (&opts
);
1114 if (real_type_found
)
1115 *real_type_found
= 0;
1116 result
= value_type (value
);
1117 if (opts
.objectprint
)
1119 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1120 fetch its rtti type. */
1121 if ((TYPE_CODE (result
) == TYPE_CODE_PTR
1122 || TYPE_CODE (result
) == TYPE_CODE_REF
)
1123 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result
)))
1124 == TYPE_CODE_STRUCT
)
1126 struct type
*real_type
;
1128 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1131 if (real_type_found
)
1132 *real_type_found
= 1;
1136 else if (resolve_simple_types
)
1138 if (real_type_found
)
1139 *real_type_found
= 1;
1140 result
= value_enclosing_type (value
);
1148 error_value_optimized_out (void)
1150 error (_("value has been optimized out"));
1154 require_not_optimized_out (const struct value
*value
)
1156 if (!VEC_empty (range_s
, value
->optimized_out
))
1158 if (value
->lval
== lval_register
)
1159 error (_("register has not been saved in frame"));
1161 error_value_optimized_out ();
1166 require_available (const struct value
*value
)
1168 if (!VEC_empty (range_s
, value
->unavailable
))
1169 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1173 value_contents_for_printing (struct value
*value
)
1176 value_fetch_lazy (value
);
1177 return value
->contents
;
1181 value_contents_for_printing_const (const struct value
*value
)
1183 gdb_assert (!value
->lazy
);
1184 return value
->contents
;
1188 value_contents_all (struct value
*value
)
1190 const gdb_byte
*result
= value_contents_for_printing (value
);
1191 require_not_optimized_out (value
);
1192 require_available (value
);
1196 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1197 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1200 ranges_copy_adjusted (VEC (range_s
) **dst_range
, int dst_bit_offset
,
1201 VEC (range_s
) *src_range
, int src_bit_offset
,
1207 for (i
= 0; VEC_iterate (range_s
, src_range
, i
, r
); i
++)
1211 l
= max (r
->offset
, src_bit_offset
);
1212 h
= min (r
->offset
+ r
->length
, src_bit_offset
+ bit_length
);
1215 insert_into_bit_range_vector (dst_range
,
1216 dst_bit_offset
+ (l
- src_bit_offset
),
1221 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1222 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1225 value_ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1226 const struct value
*src
, int src_bit_offset
,
1229 ranges_copy_adjusted (&dst
->unavailable
, dst_bit_offset
,
1230 src
->unavailable
, src_bit_offset
,
1232 ranges_copy_adjusted (&dst
->optimized_out
, dst_bit_offset
,
1233 src
->optimized_out
, src_bit_offset
,
1237 /* Copy LENGTH bytes of SRC value's (all) contents
1238 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1239 contents, starting at DST_OFFSET. If unavailable contents are
1240 being copied from SRC, the corresponding DST contents are marked
1241 unavailable accordingly. Neither DST nor SRC may be lazy
1244 It is assumed the contents of DST in the [DST_OFFSET,
1245 DST_OFFSET+LENGTH) range are wholly available. */
1248 value_contents_copy_raw (struct value
*dst
, int dst_offset
,
1249 struct value
*src
, int src_offset
, int length
)
1253 int src_bit_offset
, dst_bit_offset
, bit_length
;
1255 /* A lazy DST would make that this copy operation useless, since as
1256 soon as DST's contents were un-lazied (by a later value_contents
1257 call, say), the contents would be overwritten. A lazy SRC would
1258 mean we'd be copying garbage. */
1259 gdb_assert (!dst
->lazy
&& !src
->lazy
);
1261 /* The overwritten DST range gets unavailability ORed in, not
1262 replaced. Make sure to remember to implement replacing if it
1263 turns out actually necessary. */
1264 gdb_assert (value_bytes_available (dst
, dst_offset
, length
));
1265 gdb_assert (!value_bits_any_optimized_out (dst
,
1266 TARGET_CHAR_BIT
* dst_offset
,
1267 TARGET_CHAR_BIT
* length
));
1269 /* Copy the data. */
1270 memcpy (value_contents_all_raw (dst
) + dst_offset
,
1271 value_contents_all_raw (src
) + src_offset
,
1274 /* Copy the meta-data, adjusted. */
1275 src_bit_offset
= src_offset
* TARGET_CHAR_BIT
;
1276 dst_bit_offset
= dst_offset
* TARGET_CHAR_BIT
;
1277 bit_length
= length
* TARGET_CHAR_BIT
;
1279 value_ranges_copy_adjusted (dst
, dst_bit_offset
,
1280 src
, src_bit_offset
,
1284 /* Copy LENGTH bytes of SRC value's (all) contents
1285 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1286 (all) contents, starting at DST_OFFSET. If unavailable contents
1287 are being copied from SRC, the corresponding DST contents are
1288 marked unavailable accordingly. DST must not be lazy. If SRC is
1289 lazy, it will be fetched now.
1291 It is assumed the contents of DST in the [DST_OFFSET,
1292 DST_OFFSET+LENGTH) range are wholly available. */
1295 value_contents_copy (struct value
*dst
, int dst_offset
,
1296 struct value
*src
, int src_offset
, int length
)
1299 value_fetch_lazy (src
);
1301 value_contents_copy_raw (dst
, dst_offset
, src
, src_offset
, length
);
1305 value_lazy (struct value
*value
)
1311 set_value_lazy (struct value
*value
, int val
)
1317 value_stack (struct value
*value
)
1319 return value
->stack
;
1323 set_value_stack (struct value
*value
, int val
)
1329 value_contents (struct value
*value
)
1331 const gdb_byte
*result
= value_contents_writeable (value
);
1332 require_not_optimized_out (value
);
1333 require_available (value
);
1338 value_contents_writeable (struct value
*value
)
1341 value_fetch_lazy (value
);
1342 return value_contents_raw (value
);
1346 value_optimized_out (struct value
*value
)
1348 /* We can only know if a value is optimized out once we have tried to
1350 if (VEC_empty (range_s
, value
->optimized_out
) && value
->lazy
)
1351 value_fetch_lazy (value
);
1353 return !VEC_empty (range_s
, value
->optimized_out
);
1356 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1357 the following LENGTH bytes. */
1360 mark_value_bytes_optimized_out (struct value
*value
, int offset
, int length
)
1362 mark_value_bits_optimized_out (value
,
1363 offset
* TARGET_CHAR_BIT
,
1364 length
* TARGET_CHAR_BIT
);
1370 mark_value_bits_optimized_out (struct value
*value
, int offset
, int length
)
1372 insert_into_bit_range_vector (&value
->optimized_out
, offset
, length
);
1376 value_bits_synthetic_pointer (const struct value
*value
,
1377 int offset
, int length
)
1379 if (value
->lval
!= lval_computed
1380 || !value
->location
.computed
.funcs
->check_synthetic_pointer
)
1382 return value
->location
.computed
.funcs
->check_synthetic_pointer (value
,
1388 value_embedded_offset (struct value
*value
)
1390 return value
->embedded_offset
;
1394 set_value_embedded_offset (struct value
*value
, int val
)
1396 value
->embedded_offset
= val
;
1400 value_pointed_to_offset (struct value
*value
)
1402 return value
->pointed_to_offset
;
1406 set_value_pointed_to_offset (struct value
*value
, int val
)
1408 value
->pointed_to_offset
= val
;
1411 const struct lval_funcs
*
1412 value_computed_funcs (const struct value
*v
)
1414 gdb_assert (value_lval_const (v
) == lval_computed
);
1416 return v
->location
.computed
.funcs
;
1420 value_computed_closure (const struct value
*v
)
1422 gdb_assert (v
->lval
== lval_computed
);
1424 return v
->location
.computed
.closure
;
1428 deprecated_value_lval_hack (struct value
*value
)
1430 return &value
->lval
;
1434 value_lval_const (const struct value
*value
)
1440 value_address (const struct value
*value
)
1442 if (value
->lval
== lval_internalvar
1443 || value
->lval
== lval_internalvar_component
1444 || value
->lval
== lval_xcallable
)
1446 if (value
->parent
!= NULL
)
1447 return value_address (value
->parent
) + value
->offset
;
1449 return value
->location
.address
+ value
->offset
;
1453 value_raw_address (struct value
*value
)
1455 if (value
->lval
== lval_internalvar
1456 || value
->lval
== lval_internalvar_component
1457 || value
->lval
== lval_xcallable
)
1459 return value
->location
.address
;
1463 set_value_address (struct value
*value
, CORE_ADDR addr
)
1465 gdb_assert (value
->lval
!= lval_internalvar
1466 && value
->lval
!= lval_internalvar_component
1467 && value
->lval
!= lval_xcallable
);
1468 value
->location
.address
= addr
;
1471 struct internalvar
**
1472 deprecated_value_internalvar_hack (struct value
*value
)
1474 return &value
->location
.internalvar
;
1478 deprecated_value_frame_id_hack (struct value
*value
)
1480 return &value
->frame_id
;
1484 deprecated_value_regnum_hack (struct value
*value
)
1486 return &value
->regnum
;
1490 deprecated_value_modifiable (struct value
*value
)
1492 return value
->modifiable
;
1495 /* Return a mark in the value chain. All values allocated after the
1496 mark is obtained (except for those released) are subject to being freed
1497 if a subsequent value_free_to_mark is passed the mark. */
1504 /* Take a reference to VAL. VAL will not be deallocated until all
1505 references are released. */
1508 value_incref (struct value
*val
)
1510 val
->reference_count
++;
1513 /* Release a reference to VAL, which was acquired with value_incref.
1514 This function is also called to deallocate values from the value
1518 value_free (struct value
*val
)
1522 gdb_assert (val
->reference_count
> 0);
1523 val
->reference_count
--;
1524 if (val
->reference_count
> 0)
1527 /* If there's an associated parent value, drop our reference to
1529 if (val
->parent
!= NULL
)
1530 value_free (val
->parent
);
1532 if (VALUE_LVAL (val
) == lval_computed
)
1534 const struct lval_funcs
*funcs
= val
->location
.computed
.funcs
;
1536 if (funcs
->free_closure
)
1537 funcs
->free_closure (val
);
1539 else if (VALUE_LVAL (val
) == lval_xcallable
)
1540 free_xmethod_worker (val
->location
.xm_worker
);
1542 xfree (val
->contents
);
1543 VEC_free (range_s
, val
->unavailable
);
1548 /* Free all values allocated since MARK was obtained by value_mark
1549 (except for those released). */
1551 value_free_to_mark (struct value
*mark
)
1556 for (val
= all_values
; val
&& val
!= mark
; val
= next
)
1565 /* Free all the values that have been allocated (except for those released).
1566 Call after each command, successful or not.
1567 In practice this is called before each command, which is sufficient. */
1570 free_all_values (void)
1575 for (val
= all_values
; val
; val
= next
)
1585 /* Frees all the elements in a chain of values. */
1588 free_value_chain (struct value
*v
)
1594 next
= value_next (v
);
1599 /* Remove VAL from the chain all_values
1600 so it will not be freed automatically. */
1603 release_value (struct value
*val
)
1607 if (all_values
== val
)
1609 all_values
= val
->next
;
1615 for (v
= all_values
; v
; v
= v
->next
)
1619 v
->next
= val
->next
;
1627 /* If the value is not already released, release it.
1628 If the value is already released, increment its reference count.
1629 That is, this function ensures that the value is released from the
1630 value chain and that the caller owns a reference to it. */
1633 release_value_or_incref (struct value
*val
)
1638 release_value (val
);
1641 /* Release all values up to mark */
1643 value_release_to_mark (struct value
*mark
)
1648 for (val
= next
= all_values
; next
; next
= next
->next
)
1650 if (next
->next
== mark
)
1652 all_values
= next
->next
;
1662 /* Return a copy of the value ARG.
1663 It contains the same contents, for same memory address,
1664 but it's a different block of storage. */
1667 value_copy (struct value
*arg
)
1669 struct type
*encl_type
= value_enclosing_type (arg
);
1672 if (value_lazy (arg
))
1673 val
= allocate_value_lazy (encl_type
);
1675 val
= allocate_value (encl_type
);
1676 val
->type
= arg
->type
;
1677 VALUE_LVAL (val
) = VALUE_LVAL (arg
);
1678 val
->location
= arg
->location
;
1679 val
->offset
= arg
->offset
;
1680 val
->bitpos
= arg
->bitpos
;
1681 val
->bitsize
= arg
->bitsize
;
1682 VALUE_FRAME_ID (val
) = VALUE_FRAME_ID (arg
);
1683 VALUE_REGNUM (val
) = VALUE_REGNUM (arg
);
1684 val
->lazy
= arg
->lazy
;
1685 val
->embedded_offset
= value_embedded_offset (arg
);
1686 val
->pointed_to_offset
= arg
->pointed_to_offset
;
1687 val
->modifiable
= arg
->modifiable
;
1688 if (!value_lazy (val
))
1690 memcpy (value_contents_all_raw (val
), value_contents_all_raw (arg
),
1691 TYPE_LENGTH (value_enclosing_type (arg
)));
1694 val
->unavailable
= VEC_copy (range_s
, arg
->unavailable
);
1695 val
->optimized_out
= VEC_copy (range_s
, arg
->optimized_out
);
1696 set_value_parent (val
, arg
->parent
);
1697 if (VALUE_LVAL (val
) == lval_computed
)
1699 const struct lval_funcs
*funcs
= val
->location
.computed
.funcs
;
1701 if (funcs
->copy_closure
)
1702 val
->location
.computed
.closure
= funcs
->copy_closure (val
);
1707 /* Return a version of ARG that is non-lvalue. */
1710 value_non_lval (struct value
*arg
)
1712 if (VALUE_LVAL (arg
) != not_lval
)
1714 struct type
*enc_type
= value_enclosing_type (arg
);
1715 struct value
*val
= allocate_value (enc_type
);
1717 memcpy (value_contents_all_raw (val
), value_contents_all (arg
),
1718 TYPE_LENGTH (enc_type
));
1719 val
->type
= arg
->type
;
1720 set_value_embedded_offset (val
, value_embedded_offset (arg
));
1721 set_value_pointed_to_offset (val
, value_pointed_to_offset (arg
));
1727 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1730 value_force_lval (struct value
*v
, CORE_ADDR addr
)
1732 gdb_assert (VALUE_LVAL (v
) == not_lval
);
1734 write_memory (addr
, value_contents_raw (v
), TYPE_LENGTH (value_type (v
)));
1735 v
->lval
= lval_memory
;
1736 v
->location
.address
= addr
;
1740 set_value_component_location (struct value
*component
,
1741 const struct value
*whole
)
1743 gdb_assert (whole
->lval
!= lval_xcallable
);
1745 if (whole
->lval
== lval_internalvar
)
1746 VALUE_LVAL (component
) = lval_internalvar_component
;
1748 VALUE_LVAL (component
) = whole
->lval
;
1750 component
->location
= whole
->location
;
1751 if (whole
->lval
== lval_computed
)
1753 const struct lval_funcs
*funcs
= whole
->location
.computed
.funcs
;
1755 if (funcs
->copy_closure
)
1756 component
->location
.computed
.closure
= funcs
->copy_closure (whole
);
1761 /* Access to the value history. */
1763 /* Record a new value in the value history.
1764 Returns the absolute history index of the entry. */
1767 record_latest_value (struct value
*val
)
1771 /* We don't want this value to have anything to do with the inferior anymore.
1772 In particular, "set $1 = 50" should not affect the variable from which
1773 the value was taken, and fast watchpoints should be able to assume that
1774 a value on the value history never changes. */
1775 if (value_lazy (val
))
1776 value_fetch_lazy (val
);
1777 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1778 from. This is a bit dubious, because then *&$1 does not just return $1
1779 but the current contents of that location. c'est la vie... */
1780 val
->modifiable
= 0;
1782 /* The value may have already been released, in which case we're adding a
1783 new reference for its entry in the history. That is why we call
1784 release_value_or_incref here instead of release_value. */
1785 release_value_or_incref (val
);
1787 /* Here we treat value_history_count as origin-zero
1788 and applying to the value being stored now. */
1790 i
= value_history_count
% VALUE_HISTORY_CHUNK
;
1793 struct value_history_chunk
*new
1794 = (struct value_history_chunk
*)
1796 xmalloc (sizeof (struct value_history_chunk
));
1797 memset (new->values
, 0, sizeof new->values
);
1798 new->next
= value_history_chain
;
1799 value_history_chain
= new;
1802 value_history_chain
->values
[i
] = val
;
1804 /* Now we regard value_history_count as origin-one
1805 and applying to the value just stored. */
1807 return ++value_history_count
;
1810 /* Return a copy of the value in the history with sequence number NUM. */
1813 access_value_history (int num
)
1815 struct value_history_chunk
*chunk
;
1820 absnum
+= value_history_count
;
1825 error (_("The history is empty."));
1827 error (_("There is only one value in the history."));
1829 error (_("History does not go back to $$%d."), -num
);
1831 if (absnum
> value_history_count
)
1832 error (_("History has not yet reached $%d."), absnum
);
1836 /* Now absnum is always absolute and origin zero. */
1838 chunk
= value_history_chain
;
1839 for (i
= (value_history_count
- 1) / VALUE_HISTORY_CHUNK
1840 - absnum
/ VALUE_HISTORY_CHUNK
;
1842 chunk
= chunk
->next
;
1844 return value_copy (chunk
->values
[absnum
% VALUE_HISTORY_CHUNK
]);
1848 show_values (char *num_exp
, int from_tty
)
1856 /* "show values +" should print from the stored position.
1857 "show values <exp>" should print around value number <exp>. */
1858 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1859 num
= parse_and_eval_long (num_exp
) - 5;
1863 /* "show values" means print the last 10 values. */
1864 num
= value_history_count
- 9;
1870 for (i
= num
; i
< num
+ 10 && i
<= value_history_count
; i
++)
1872 struct value_print_options opts
;
1874 val
= access_value_history (i
);
1875 printf_filtered (("$%d = "), i
);
1876 get_user_print_options (&opts
);
1877 value_print (val
, gdb_stdout
, &opts
);
1878 printf_filtered (("\n"));
1881 /* The next "show values +" should start after what we just printed. */
1884 /* Hitting just return after this command should do the same thing as
1885 "show values +". If num_exp is null, this is unnecessary, since
1886 "show values +" is not useful after "show values". */
1887 if (from_tty
&& num_exp
)
1894 /* Internal variables. These are variables within the debugger
1895 that hold values assigned by debugger commands.
1896 The user refers to them with a '$' prefix
1897 that does not appear in the variable names stored internally. */
1901 struct internalvar
*next
;
1904 /* We support various different kinds of content of an internal variable.
1905 enum internalvar_kind specifies the kind, and union internalvar_data
1906 provides the data associated with this particular kind. */
1908 enum internalvar_kind
1910 /* The internal variable is empty. */
1913 /* The value of the internal variable is provided directly as
1914 a GDB value object. */
1917 /* A fresh value is computed via a call-back routine on every
1918 access to the internal variable. */
1919 INTERNALVAR_MAKE_VALUE
,
1921 /* The internal variable holds a GDB internal convenience function. */
1922 INTERNALVAR_FUNCTION
,
1924 /* The variable holds an integer value. */
1925 INTERNALVAR_INTEGER
,
1927 /* The variable holds a GDB-provided string. */
1932 union internalvar_data
1934 /* A value object used with INTERNALVAR_VALUE. */
1935 struct value
*value
;
1937 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1940 /* The functions to call. */
1941 const struct internalvar_funcs
*functions
;
1943 /* The function's user-data. */
1947 /* The internal function used with INTERNALVAR_FUNCTION. */
1950 struct internal_function
*function
;
1951 /* True if this is the canonical name for the function. */
1955 /* An integer value used with INTERNALVAR_INTEGER. */
1958 /* If type is non-NULL, it will be used as the type to generate
1959 a value for this internal variable. If type is NULL, a default
1960 integer type for the architecture is used. */
1965 /* A string value used with INTERNALVAR_STRING. */
1970 static struct internalvar
*internalvars
;
1972 /* If the variable does not already exist create it and give it the
1973 value given. If no value is given then the default is zero. */
1975 init_if_undefined_command (char* args
, int from_tty
)
1977 struct internalvar
* intvar
;
1979 /* Parse the expression - this is taken from set_command(). */
1980 struct expression
*expr
= parse_expression (args
);
1981 register struct cleanup
*old_chain
=
1982 make_cleanup (free_current_contents
, &expr
);
1984 /* Validate the expression.
1985 Was the expression an assignment?
1986 Or even an expression at all? */
1987 if (expr
->nelts
== 0 || expr
->elts
[0].opcode
!= BINOP_ASSIGN
)
1988 error (_("Init-if-undefined requires an assignment expression."));
1990 /* Extract the variable from the parsed expression.
1991 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1992 if (expr
->elts
[1].opcode
!= OP_INTERNALVAR
)
1993 error (_("The first parameter to init-if-undefined "
1994 "should be a GDB variable."));
1995 intvar
= expr
->elts
[2].internalvar
;
1997 /* Only evaluate the expression if the lvalue is void.
1998 This may still fail if the expresssion is invalid. */
1999 if (intvar
->kind
== INTERNALVAR_VOID
)
2000 evaluate_expression (expr
);
2002 do_cleanups (old_chain
);
2006 /* Look up an internal variable with name NAME. NAME should not
2007 normally include a dollar sign.
2009 If the specified internal variable does not exist,
2010 the return value is NULL. */
2012 struct internalvar
*
2013 lookup_only_internalvar (const char *name
)
2015 struct internalvar
*var
;
2017 for (var
= internalvars
; var
; var
= var
->next
)
2018 if (strcmp (var
->name
, name
) == 0)
2024 /* Complete NAME by comparing it to the names of internal variables.
2025 Returns a vector of newly allocated strings, or NULL if no matches
2029 complete_internalvar (const char *name
)
2031 VEC (char_ptr
) *result
= NULL
;
2032 struct internalvar
*var
;
2035 len
= strlen (name
);
2037 for (var
= internalvars
; var
; var
= var
->next
)
2038 if (strncmp (var
->name
, name
, len
) == 0)
2040 char *r
= xstrdup (var
->name
);
2042 VEC_safe_push (char_ptr
, result
, r
);
2048 /* Create an internal variable with name NAME and with a void value.
2049 NAME should not normally include a dollar sign. */
2051 struct internalvar
*
2052 create_internalvar (const char *name
)
2054 struct internalvar
*var
;
2056 var
= (struct internalvar
*) xmalloc (sizeof (struct internalvar
));
2057 var
->name
= concat (name
, (char *)NULL
);
2058 var
->kind
= INTERNALVAR_VOID
;
2059 var
->next
= internalvars
;
2064 /* Create an internal variable with name NAME and register FUN as the
2065 function that value_of_internalvar uses to create a value whenever
2066 this variable is referenced. NAME should not normally include a
2067 dollar sign. DATA is passed uninterpreted to FUN when it is
2068 called. CLEANUP, if not NULL, is called when the internal variable
2069 is destroyed. It is passed DATA as its only argument. */
2071 struct internalvar
*
2072 create_internalvar_type_lazy (const char *name
,
2073 const struct internalvar_funcs
*funcs
,
2076 struct internalvar
*var
= create_internalvar (name
);
2078 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2079 var
->u
.make_value
.functions
= funcs
;
2080 var
->u
.make_value
.data
= data
;
2084 /* See documentation in value.h. */
2087 compile_internalvar_to_ax (struct internalvar
*var
,
2088 struct agent_expr
*expr
,
2089 struct axs_value
*value
)
2091 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2092 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2095 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2096 var
->u
.make_value
.data
);
2100 /* Look up an internal variable with name NAME. NAME should not
2101 normally include a dollar sign.
2103 If the specified internal variable does not exist,
2104 one is created, with a void value. */
2106 struct internalvar
*
2107 lookup_internalvar (const char *name
)
2109 struct internalvar
*var
;
2111 var
= lookup_only_internalvar (name
);
2115 return create_internalvar (name
);
2118 /* Return current value of internal variable VAR. For variables that
2119 are not inherently typed, use a value type appropriate for GDBARCH. */
2122 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2125 struct trace_state_variable
*tsv
;
2127 /* If there is a trace state variable of the same name, assume that
2128 is what we really want to see. */
2129 tsv
= find_trace_state_variable (var
->name
);
2132 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2134 if (tsv
->value_known
)
2135 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2138 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2144 case INTERNALVAR_VOID
:
2145 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2148 case INTERNALVAR_FUNCTION
:
2149 val
= allocate_value (builtin_type (gdbarch
)->internal_fn
);
2152 case INTERNALVAR_INTEGER
:
2153 if (!var
->u
.integer
.type
)
2154 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2155 var
->u
.integer
.val
);
2157 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2160 case INTERNALVAR_STRING
:
2161 val
= value_cstring (var
->u
.string
, strlen (var
->u
.string
),
2162 builtin_type (gdbarch
)->builtin_char
);
2165 case INTERNALVAR_VALUE
:
2166 val
= value_copy (var
->u
.value
);
2167 if (value_lazy (val
))
2168 value_fetch_lazy (val
);
2171 case INTERNALVAR_MAKE_VALUE
:
2172 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2173 var
->u
.make_value
.data
);
2177 internal_error (__FILE__
, __LINE__
, _("bad kind"));
2180 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2181 on this value go back to affect the original internal variable.
2183 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2184 no underlying modifyable state in the internal variable.
2186 Likewise, if the variable's value is a computed lvalue, we want
2187 references to it to produce another computed lvalue, where
2188 references and assignments actually operate through the
2189 computed value's functions.
2191 This means that internal variables with computed values
2192 behave a little differently from other internal variables:
2193 assignments to them don't just replace the previous value
2194 altogether. At the moment, this seems like the behavior we
2197 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2198 && val
->lval
!= lval_computed
)
2200 VALUE_LVAL (val
) = lval_internalvar
;
2201 VALUE_INTERNALVAR (val
) = var
;
2208 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2210 if (var
->kind
== INTERNALVAR_INTEGER
)
2212 *result
= var
->u
.integer
.val
;
2216 if (var
->kind
== INTERNALVAR_VALUE
)
2218 struct type
*type
= check_typedef (value_type (var
->u
.value
));
2220 if (TYPE_CODE (type
) == TYPE_CODE_INT
)
2222 *result
= value_as_long (var
->u
.value
);
2231 get_internalvar_function (struct internalvar
*var
,
2232 struct internal_function
**result
)
2236 case INTERNALVAR_FUNCTION
:
2237 *result
= var
->u
.fn
.function
;
2246 set_internalvar_component (struct internalvar
*var
, int offset
, int bitpos
,
2247 int bitsize
, struct value
*newval
)
2253 case INTERNALVAR_VALUE
:
2254 addr
= value_contents_writeable (var
->u
.value
);
2257 modify_field (value_type (var
->u
.value
), addr
+ offset
,
2258 value_as_long (newval
), bitpos
, bitsize
);
2260 memcpy (addr
+ offset
, value_contents (newval
),
2261 TYPE_LENGTH (value_type (newval
)));
2265 /* We can never get a component of any other kind. */
2266 internal_error (__FILE__
, __LINE__
, _("set_internalvar_component"));
2271 set_internalvar (struct internalvar
*var
, struct value
*val
)
2273 enum internalvar_kind new_kind
;
2274 union internalvar_data new_data
= { 0 };
2276 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2277 error (_("Cannot overwrite convenience function %s"), var
->name
);
2279 /* Prepare new contents. */
2280 switch (TYPE_CODE (check_typedef (value_type (val
))))
2282 case TYPE_CODE_VOID
:
2283 new_kind
= INTERNALVAR_VOID
;
2286 case TYPE_CODE_INTERNAL_FUNCTION
:
2287 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2288 new_kind
= INTERNALVAR_FUNCTION
;
2289 get_internalvar_function (VALUE_INTERNALVAR (val
),
2290 &new_data
.fn
.function
);
2291 /* Copies created here are never canonical. */
2295 new_kind
= INTERNALVAR_VALUE
;
2296 new_data
.value
= value_copy (val
);
2297 new_data
.value
->modifiable
= 1;
2299 /* Force the value to be fetched from the target now, to avoid problems
2300 later when this internalvar is referenced and the target is gone or
2302 if (value_lazy (new_data
.value
))
2303 value_fetch_lazy (new_data
.value
);
2305 /* Release the value from the value chain to prevent it from being
2306 deleted by free_all_values. From here on this function should not
2307 call error () until new_data is installed into the var->u to avoid
2309 release_value (new_data
.value
);
2313 /* Clean up old contents. */
2314 clear_internalvar (var
);
2317 var
->kind
= new_kind
;
2319 /* End code which must not call error(). */
2323 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2325 /* Clean up old contents. */
2326 clear_internalvar (var
);
2328 var
->kind
= INTERNALVAR_INTEGER
;
2329 var
->u
.integer
.type
= NULL
;
2330 var
->u
.integer
.val
= l
;
2334 set_internalvar_string (struct internalvar
*var
, const char *string
)
2336 /* Clean up old contents. */
2337 clear_internalvar (var
);
2339 var
->kind
= INTERNALVAR_STRING
;
2340 var
->u
.string
= xstrdup (string
);
2344 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2346 /* Clean up old contents. */
2347 clear_internalvar (var
);
2349 var
->kind
= INTERNALVAR_FUNCTION
;
2350 var
->u
.fn
.function
= f
;
2351 var
->u
.fn
.canonical
= 1;
2352 /* Variables installed here are always the canonical version. */
2356 clear_internalvar (struct internalvar
*var
)
2358 /* Clean up old contents. */
2361 case INTERNALVAR_VALUE
:
2362 value_free (var
->u
.value
);
2365 case INTERNALVAR_STRING
:
2366 xfree (var
->u
.string
);
2369 case INTERNALVAR_MAKE_VALUE
:
2370 if (var
->u
.make_value
.functions
->destroy
!= NULL
)
2371 var
->u
.make_value
.functions
->destroy (var
->u
.make_value
.data
);
2378 /* Reset to void kind. */
2379 var
->kind
= INTERNALVAR_VOID
;
2383 internalvar_name (struct internalvar
*var
)
2388 static struct internal_function
*
2389 create_internal_function (const char *name
,
2390 internal_function_fn handler
, void *cookie
)
2392 struct internal_function
*ifn
= XNEW (struct internal_function
);
2394 ifn
->name
= xstrdup (name
);
2395 ifn
->handler
= handler
;
2396 ifn
->cookie
= cookie
;
2401 value_internal_function_name (struct value
*val
)
2403 struct internal_function
*ifn
;
2406 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2407 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2408 gdb_assert (result
);
2414 call_internal_function (struct gdbarch
*gdbarch
,
2415 const struct language_defn
*language
,
2416 struct value
*func
, int argc
, struct value
**argv
)
2418 struct internal_function
*ifn
;
2421 gdb_assert (VALUE_LVAL (func
) == lval_internalvar
);
2422 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2423 gdb_assert (result
);
2425 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2428 /* The 'function' command. This does nothing -- it is just a
2429 placeholder to let "help function NAME" work. This is also used as
2430 the implementation of the sub-command that is created when
2431 registering an internal function. */
2433 function_command (char *command
, int from_tty
)
2438 /* Clean up if an internal function's command is destroyed. */
2440 function_destroyer (struct cmd_list_element
*self
, void *ignore
)
2442 xfree ((char *) self
->name
);
2443 xfree ((char *) self
->doc
);
2446 /* Add a new internal function. NAME is the name of the function; DOC
2447 is a documentation string describing the function. HANDLER is
2448 called when the function is invoked. COOKIE is an arbitrary
2449 pointer which is passed to HANDLER and is intended for "user
2452 add_internal_function (const char *name
, const char *doc
,
2453 internal_function_fn handler
, void *cookie
)
2455 struct cmd_list_element
*cmd
;
2456 struct internal_function
*ifn
;
2457 struct internalvar
*var
= lookup_internalvar (name
);
2459 ifn
= create_internal_function (name
, handler
, cookie
);
2460 set_internalvar_function (var
, ifn
);
2462 cmd
= add_cmd (xstrdup (name
), no_class
, function_command
, (char *) doc
,
2464 cmd
->destroyer
= function_destroyer
;
2467 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2468 prevent cycles / duplicates. */
2471 preserve_one_value (struct value
*value
, struct objfile
*objfile
,
2472 htab_t copied_types
)
2474 if (TYPE_OBJFILE (value
->type
) == objfile
)
2475 value
->type
= copy_type_recursive (objfile
, value
->type
, copied_types
);
2477 if (TYPE_OBJFILE (value
->enclosing_type
) == objfile
)
2478 value
->enclosing_type
= copy_type_recursive (objfile
,
2479 value
->enclosing_type
,
2483 /* Likewise for internal variable VAR. */
2486 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2487 htab_t copied_types
)
2491 case INTERNALVAR_INTEGER
:
2492 if (var
->u
.integer
.type
&& TYPE_OBJFILE (var
->u
.integer
.type
) == objfile
)
2494 = copy_type_recursive (objfile
, var
->u
.integer
.type
, copied_types
);
2497 case INTERNALVAR_VALUE
:
2498 preserve_one_value (var
->u
.value
, objfile
, copied_types
);
2503 /* Update the internal variables and value history when OBJFILE is
2504 discarded; we must copy the types out of the objfile. New global types
2505 will be created for every convenience variable which currently points to
2506 this objfile's types, and the convenience variables will be adjusted to
2507 use the new global types. */
2510 preserve_values (struct objfile
*objfile
)
2512 htab_t copied_types
;
2513 struct value_history_chunk
*cur
;
2514 struct internalvar
*var
;
2517 /* Create the hash table. We allocate on the objfile's obstack, since
2518 it is soon to be deleted. */
2519 copied_types
= create_copied_types_hash (objfile
);
2521 for (cur
= value_history_chain
; cur
; cur
= cur
->next
)
2522 for (i
= 0; i
< VALUE_HISTORY_CHUNK
; i
++)
2524 preserve_one_value (cur
->values
[i
], objfile
, copied_types
);
2526 for (var
= internalvars
; var
; var
= var
->next
)
2527 preserve_one_internalvar (var
, objfile
, copied_types
);
2529 preserve_ext_lang_values (objfile
, copied_types
);
2531 htab_delete (copied_types
);
2535 show_convenience (char *ignore
, int from_tty
)
2537 struct gdbarch
*gdbarch
= get_current_arch ();
2538 struct internalvar
*var
;
2540 struct value_print_options opts
;
2542 get_user_print_options (&opts
);
2543 for (var
= internalvars
; var
; var
= var
->next
)
2545 volatile struct gdb_exception ex
;
2551 printf_filtered (("$%s = "), var
->name
);
2553 TRY_CATCH (ex
, RETURN_MASK_ERROR
)
2557 val
= value_of_internalvar (gdbarch
, var
);
2558 value_print (val
, gdb_stdout
, &opts
);
2561 fprintf_filtered (gdb_stdout
, _("<error: %s>"), ex
.message
);
2562 printf_filtered (("\n"));
2566 /* This text does not mention convenience functions on purpose.
2567 The user can't create them except via Python, and if Python support
2568 is installed this message will never be printed ($_streq will
2570 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2571 "Convenience variables have "
2572 "names starting with \"$\";\n"
2573 "use \"set\" as in \"set "
2574 "$foo = 5\" to define them.\n"));
2578 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2581 value_of_xmethod (struct xmethod_worker
*worker
)
2583 if (worker
->value
== NULL
)
2587 v
= allocate_value (builtin_type (target_gdbarch ())->xmethod
);
2588 v
->lval
= lval_xcallable
;
2589 v
->location
.xm_worker
= worker
;
2594 return worker
->value
;
2597 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2600 call_xmethod (struct value
*method
, int argc
, struct value
**argv
)
2602 gdb_assert (TYPE_CODE (value_type (method
)) == TYPE_CODE_XMETHOD
2603 && method
->lval
== lval_xcallable
&& argc
> 0);
2605 return invoke_xmethod (method
->location
.xm_worker
,
2606 argv
[0], argv
+ 1, argc
- 1);
2609 /* Extract a value as a C number (either long or double).
2610 Knows how to convert fixed values to double, or
2611 floating values to long.
2612 Does not deallocate the value. */
2615 value_as_long (struct value
*val
)
2617 /* This coerces arrays and functions, which is necessary (e.g.
2618 in disassemble_command). It also dereferences references, which
2619 I suspect is the most logical thing to do. */
2620 val
= coerce_array (val
);
2621 return unpack_long (value_type (val
), value_contents (val
));
2625 value_as_double (struct value
*val
)
2630 foo
= unpack_double (value_type (val
), value_contents (val
), &inv
);
2632 error (_("Invalid floating value found in program."));
2636 /* Extract a value as a C pointer. Does not deallocate the value.
2637 Note that val's type may not actually be a pointer; value_as_long
2638 handles all the cases. */
2640 value_as_address (struct value
*val
)
2642 struct gdbarch
*gdbarch
= get_type_arch (value_type (val
));
2644 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2645 whether we want this to be true eventually. */
2647 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2648 non-address (e.g. argument to "signal", "info break", etc.), or
2649 for pointers to char, in which the low bits *are* significant. */
2650 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2653 /* There are several targets (IA-64, PowerPC, and others) which
2654 don't represent pointers to functions as simply the address of
2655 the function's entry point. For example, on the IA-64, a
2656 function pointer points to a two-word descriptor, generated by
2657 the linker, which contains the function's entry point, and the
2658 value the IA-64 "global pointer" register should have --- to
2659 support position-independent code. The linker generates
2660 descriptors only for those functions whose addresses are taken.
2662 On such targets, it's difficult for GDB to convert an arbitrary
2663 function address into a function pointer; it has to either find
2664 an existing descriptor for that function, or call malloc and
2665 build its own. On some targets, it is impossible for GDB to
2666 build a descriptor at all: the descriptor must contain a jump
2667 instruction; data memory cannot be executed; and code memory
2670 Upon entry to this function, if VAL is a value of type `function'
2671 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2672 value_address (val) is the address of the function. This is what
2673 you'll get if you evaluate an expression like `main'. The call
2674 to COERCE_ARRAY below actually does all the usual unary
2675 conversions, which includes converting values of type `function'
2676 to `pointer to function'. This is the challenging conversion
2677 discussed above. Then, `unpack_long' will convert that pointer
2678 back into an address.
2680 So, suppose the user types `disassemble foo' on an architecture
2681 with a strange function pointer representation, on which GDB
2682 cannot build its own descriptors, and suppose further that `foo'
2683 has no linker-built descriptor. The address->pointer conversion
2684 will signal an error and prevent the command from running, even
2685 though the next step would have been to convert the pointer
2686 directly back into the same address.
2688 The following shortcut avoids this whole mess. If VAL is a
2689 function, just return its address directly. */
2690 if (TYPE_CODE (value_type (val
)) == TYPE_CODE_FUNC
2691 || TYPE_CODE (value_type (val
)) == TYPE_CODE_METHOD
)
2692 return value_address (val
);
2694 val
= coerce_array (val
);
2696 /* Some architectures (e.g. Harvard), map instruction and data
2697 addresses onto a single large unified address space. For
2698 instance: An architecture may consider a large integer in the
2699 range 0x10000000 .. 0x1000ffff to already represent a data
2700 addresses (hence not need a pointer to address conversion) while
2701 a small integer would still need to be converted integer to
2702 pointer to address. Just assume such architectures handle all
2703 integer conversions in a single function. */
2707 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2708 must admonish GDB hackers to make sure its behavior matches the
2709 compiler's, whenever possible.
2711 In general, I think GDB should evaluate expressions the same way
2712 the compiler does. When the user copies an expression out of
2713 their source code and hands it to a `print' command, they should
2714 get the same value the compiler would have computed. Any
2715 deviation from this rule can cause major confusion and annoyance,
2716 and needs to be justified carefully. In other words, GDB doesn't
2717 really have the freedom to do these conversions in clever and
2720 AndrewC pointed out that users aren't complaining about how GDB
2721 casts integers to pointers; they are complaining that they can't
2722 take an address from a disassembly listing and give it to `x/i'.
2723 This is certainly important.
2725 Adding an architecture method like integer_to_address() certainly
2726 makes it possible for GDB to "get it right" in all circumstances
2727 --- the target has complete control over how things get done, so
2728 people can Do The Right Thing for their target without breaking
2729 anyone else. The standard doesn't specify how integers get
2730 converted to pointers; usually, the ABI doesn't either, but
2731 ABI-specific code is a more reasonable place to handle it. */
2733 if (TYPE_CODE (value_type (val
)) != TYPE_CODE_PTR
2734 && TYPE_CODE (value_type (val
)) != TYPE_CODE_REF
2735 && gdbarch_integer_to_address_p (gdbarch
))
2736 return gdbarch_integer_to_address (gdbarch
, value_type (val
),
2737 value_contents (val
));
2739 return unpack_long (value_type (val
), value_contents (val
));
2743 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2744 as a long, or as a double, assuming the raw data is described
2745 by type TYPE. Knows how to convert different sizes of values
2746 and can convert between fixed and floating point. We don't assume
2747 any alignment for the raw data. Return value is in host byte order.
2749 If you want functions and arrays to be coerced to pointers, and
2750 references to be dereferenced, call value_as_long() instead.
2752 C++: It is assumed that the front-end has taken care of
2753 all matters concerning pointers to members. A pointer
2754 to member which reaches here is considered to be equivalent
2755 to an INT (or some size). After all, it is only an offset. */
2758 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2760 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
2761 enum type_code code
= TYPE_CODE (type
);
2762 int len
= TYPE_LENGTH (type
);
2763 int nosign
= TYPE_UNSIGNED (type
);
2767 case TYPE_CODE_TYPEDEF
:
2768 return unpack_long (check_typedef (type
), valaddr
);
2769 case TYPE_CODE_ENUM
:
2770 case TYPE_CODE_FLAGS
:
2771 case TYPE_CODE_BOOL
:
2773 case TYPE_CODE_CHAR
:
2774 case TYPE_CODE_RANGE
:
2775 case TYPE_CODE_MEMBERPTR
:
2777 return extract_unsigned_integer (valaddr
, len
, byte_order
);
2779 return extract_signed_integer (valaddr
, len
, byte_order
);
2782 return extract_typed_floating (valaddr
, type
);
2784 case TYPE_CODE_DECFLOAT
:
2785 /* libdecnumber has a function to convert from decimal to integer, but
2786 it doesn't work when the decimal number has a fractional part. */
2787 return decimal_to_doublest (valaddr
, len
, byte_order
);
2791 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2792 whether we want this to be true eventually. */
2793 return extract_typed_address (valaddr
, type
);
2796 error (_("Value can't be converted to integer."));
2798 return 0; /* Placate lint. */
2801 /* Return a double value from the specified type and address.
2802 INVP points to an int which is set to 0 for valid value,
2803 1 for invalid value (bad float format). In either case,
2804 the returned double is OK to use. Argument is in target
2805 format, result is in host format. */
2808 unpack_double (struct type
*type
, const gdb_byte
*valaddr
, int *invp
)
2810 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
2811 enum type_code code
;
2815 *invp
= 0; /* Assume valid. */
2816 CHECK_TYPEDEF (type
);
2817 code
= TYPE_CODE (type
);
2818 len
= TYPE_LENGTH (type
);
2819 nosign
= TYPE_UNSIGNED (type
);
2820 if (code
== TYPE_CODE_FLT
)
2822 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2823 floating-point value was valid (using the macro
2824 INVALID_FLOAT). That test/macro have been removed.
2826 It turns out that only the VAX defined this macro and then
2827 only in a non-portable way. Fixing the portability problem
2828 wouldn't help since the VAX floating-point code is also badly
2829 bit-rotten. The target needs to add definitions for the
2830 methods gdbarch_float_format and gdbarch_double_format - these
2831 exactly describe the target floating-point format. The
2832 problem here is that the corresponding floatformat_vax_f and
2833 floatformat_vax_d values these methods should be set to are
2834 also not defined either. Oops!
2836 Hopefully someone will add both the missing floatformat
2837 definitions and the new cases for floatformat_is_valid (). */
2839 if (!floatformat_is_valid (floatformat_from_type (type
), valaddr
))
2845 return extract_typed_floating (valaddr
, type
);
2847 else if (code
== TYPE_CODE_DECFLOAT
)
2848 return decimal_to_doublest (valaddr
, len
, byte_order
);
2851 /* Unsigned -- be sure we compensate for signed LONGEST. */
2852 return (ULONGEST
) unpack_long (type
, valaddr
);
2856 /* Signed -- we are OK with unpack_long. */
2857 return unpack_long (type
, valaddr
);
2861 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2862 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2863 We don't assume any alignment for the raw data. Return value is in
2866 If you want functions and arrays to be coerced to pointers, and
2867 references to be dereferenced, call value_as_address() instead.
2869 C++: It is assumed that the front-end has taken care of
2870 all matters concerning pointers to members. A pointer
2871 to member which reaches here is considered to be equivalent
2872 to an INT (or some size). After all, it is only an offset. */
2875 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2877 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2878 whether we want this to be true eventually. */
2879 return unpack_long (type
, valaddr
);
2883 /* Get the value of the FIELDNO'th field (which must be static) of
2887 value_static_field (struct type
*type
, int fieldno
)
2889 struct value
*retval
;
2891 switch (TYPE_FIELD_LOC_KIND (type
, fieldno
))
2893 case FIELD_LOC_KIND_PHYSADDR
:
2894 retval
= value_at_lazy (TYPE_FIELD_TYPE (type
, fieldno
),
2895 TYPE_FIELD_STATIC_PHYSADDR (type
, fieldno
));
2897 case FIELD_LOC_KIND_PHYSNAME
:
2899 const char *phys_name
= TYPE_FIELD_STATIC_PHYSNAME (type
, fieldno
);
2900 /* TYPE_FIELD_NAME (type, fieldno); */
2901 struct symbol
*sym
= lookup_symbol (phys_name
, 0, VAR_DOMAIN
, 0);
2905 /* With some compilers, e.g. HP aCC, static data members are
2906 reported as non-debuggable symbols. */
2907 struct bound_minimal_symbol msym
2908 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
2911 return allocate_optimized_out_value (type
);
2914 retval
= value_at_lazy (TYPE_FIELD_TYPE (type
, fieldno
),
2915 BMSYMBOL_VALUE_ADDRESS (msym
));
2919 retval
= value_of_variable (sym
, NULL
);
2923 gdb_assert_not_reached ("unexpected field location kind");
2929 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2930 You have to be careful here, since the size of the data area for the value
2931 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2932 than the old enclosing type, you have to allocate more space for the
2936 set_value_enclosing_type (struct value
*val
, struct type
*new_encl_type
)
2938 if (TYPE_LENGTH (new_encl_type
) > TYPE_LENGTH (value_enclosing_type (val
)))
2940 (gdb_byte
*) xrealloc (val
->contents
, TYPE_LENGTH (new_encl_type
));
2942 val
->enclosing_type
= new_encl_type
;
2945 /* Given a value ARG1 (offset by OFFSET bytes)
2946 of a struct or union type ARG_TYPE,
2947 extract and return the value of one of its (non-static) fields.
2948 FIELDNO says which field. */
2951 value_primitive_field (struct value
*arg1
, int offset
,
2952 int fieldno
, struct type
*arg_type
)
2957 CHECK_TYPEDEF (arg_type
);
2958 type
= TYPE_FIELD_TYPE (arg_type
, fieldno
);
2960 /* Call check_typedef on our type to make sure that, if TYPE
2961 is a TYPE_CODE_TYPEDEF, its length is set to the length
2962 of the target type instead of zero. However, we do not
2963 replace the typedef type by the target type, because we want
2964 to keep the typedef in order to be able to print the type
2965 description correctly. */
2966 check_typedef (type
);
2968 if (TYPE_FIELD_BITSIZE (arg_type
, fieldno
))
2970 /* Handle packed fields.
2972 Create a new value for the bitfield, with bitpos and bitsize
2973 set. If possible, arrange offset and bitpos so that we can
2974 do a single aligned read of the size of the containing type.
2975 Otherwise, adjust offset to the byte containing the first
2976 bit. Assume that the address, offset, and embedded offset
2977 are sufficiently aligned. */
2979 int bitpos
= TYPE_FIELD_BITPOS (arg_type
, fieldno
);
2980 int container_bitsize
= TYPE_LENGTH (type
) * 8;
2982 v
= allocate_value_lazy (type
);
2983 v
->bitsize
= TYPE_FIELD_BITSIZE (arg_type
, fieldno
);
2984 if ((bitpos
% container_bitsize
) + v
->bitsize
<= container_bitsize
2985 && TYPE_LENGTH (type
) <= (int) sizeof (LONGEST
))
2986 v
->bitpos
= bitpos
% container_bitsize
;
2988 v
->bitpos
= bitpos
% 8;
2989 v
->offset
= (value_embedded_offset (arg1
)
2991 + (bitpos
- v
->bitpos
) / 8);
2992 set_value_parent (v
, arg1
);
2993 if (!value_lazy (arg1
))
2994 value_fetch_lazy (v
);
2996 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
2998 /* This field is actually a base subobject, so preserve the
2999 entire object's contents for later references to virtual
3003 /* Lazy register values with offsets are not supported. */
3004 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3005 value_fetch_lazy (arg1
);
3007 /* We special case virtual inheritance here because this
3008 requires access to the contents, which we would rather avoid
3009 for references to ordinary fields of unavailable values. */
3010 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3011 boffset
= baseclass_offset (arg_type
, fieldno
,
3012 value_contents (arg1
),
3013 value_embedded_offset (arg1
),
3014 value_address (arg1
),
3017 boffset
= TYPE_FIELD_BITPOS (arg_type
, fieldno
) / 8;
3019 if (value_lazy (arg1
))
3020 v
= allocate_value_lazy (value_enclosing_type (arg1
));
3023 v
= allocate_value (value_enclosing_type (arg1
));
3024 value_contents_copy_raw (v
, 0, arg1
, 0,
3025 TYPE_LENGTH (value_enclosing_type (arg1
)));
3028 v
->offset
= value_offset (arg1
);
3029 v
->embedded_offset
= offset
+ value_embedded_offset (arg1
) + boffset
;
3033 /* Plain old data member */
3034 offset
+= TYPE_FIELD_BITPOS (arg_type
, fieldno
) / 8;
3036 /* Lazy register values with offsets are not supported. */
3037 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3038 value_fetch_lazy (arg1
);
3040 if (value_lazy (arg1
))
3041 v
= allocate_value_lazy (type
);
3044 v
= allocate_value (type
);
3045 value_contents_copy_raw (v
, value_embedded_offset (v
),
3046 arg1
, value_embedded_offset (arg1
) + offset
,
3047 TYPE_LENGTH (type
));
3049 v
->offset
= (value_offset (arg1
) + offset
3050 + value_embedded_offset (arg1
));
3052 set_value_component_location (v
, arg1
);
3053 VALUE_REGNUM (v
) = VALUE_REGNUM (arg1
);
3054 VALUE_FRAME_ID (v
) = VALUE_FRAME_ID (arg1
);
3058 /* Given a value ARG1 of a struct or union type,
3059 extract and return the value of one of its (non-static) fields.
3060 FIELDNO says which field. */
3063 value_field (struct value
*arg1
, int fieldno
)
3065 return value_primitive_field (arg1
, 0, fieldno
, value_type (arg1
));
3068 /* Return a non-virtual function as a value.
3069 F is the list of member functions which contains the desired method.
3070 J is an index into F which provides the desired method.
3072 We only use the symbol for its address, so be happy with either a
3073 full symbol or a minimal symbol. */
3076 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3077 int j
, struct type
*type
,
3081 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3082 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3084 struct bound_minimal_symbol msym
;
3086 sym
= lookup_symbol (physname
, 0, VAR_DOMAIN
, 0);
3089 memset (&msym
, 0, sizeof (msym
));
3093 gdb_assert (sym
== NULL
);
3094 msym
= lookup_bound_minimal_symbol (physname
);
3095 if (msym
.minsym
== NULL
)
3099 v
= allocate_value (ftype
);
3102 set_value_address (v
, BLOCK_START (SYMBOL_BLOCK_VALUE (sym
)));
3106 /* The minimal symbol might point to a function descriptor;
3107 resolve it to the actual code address instead. */
3108 struct objfile
*objfile
= msym
.objfile
;
3109 struct gdbarch
*gdbarch
= get_objfile_arch (objfile
);
3111 set_value_address (v
,
3112 gdbarch_convert_from_func_ptr_addr
3113 (gdbarch
, BMSYMBOL_VALUE_ADDRESS (msym
), ¤t_target
));
3118 if (type
!= value_type (*arg1p
))
3119 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3120 value_addr (*arg1p
)));
3122 /* Move the `this' pointer according to the offset.
3123 VALUE_OFFSET (*arg1p) += offset; */
3131 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3132 VALADDR, and store the result in *RESULT.
3133 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3135 Extracting bits depends on endianness of the machine. Compute the
3136 number of least significant bits to discard. For big endian machines,
3137 we compute the total number of bits in the anonymous object, subtract
3138 off the bit count from the MSB of the object to the MSB of the
3139 bitfield, then the size of the bitfield, which leaves the LSB discard
3140 count. For little endian machines, the discard count is simply the
3141 number of bits from the LSB of the anonymous object to the LSB of the
3144 If the field is signed, we also do sign extension. */
3147 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3148 int bitpos
, int bitsize
)
3150 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (field_type
));
3157 /* Read the minimum number of bytes required; there may not be
3158 enough bytes to read an entire ULONGEST. */
3159 CHECK_TYPEDEF (field_type
);
3161 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3163 bytes_read
= TYPE_LENGTH (field_type
);
3165 read_offset
= bitpos
/ 8;
3167 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3168 bytes_read
, byte_order
);
3170 /* Extract bits. See comment above. */
3172 if (gdbarch_bits_big_endian (get_type_arch (field_type
)))
3173 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3175 lsbcount
= (bitpos
% 8);
3178 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3179 If the field is signed, and is negative, then sign extend. */
3181 if ((bitsize
> 0) && (bitsize
< 8 * (int) sizeof (val
)))
3183 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3185 if (!TYPE_UNSIGNED (field_type
))
3187 if (val
& (valmask
^ (valmask
>> 1)))
3197 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3198 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3199 ORIGINAL_VALUE, which must not be NULL. See
3200 unpack_value_bits_as_long for more details. */
3203 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3204 int embedded_offset
, int fieldno
,
3205 const struct value
*val
, LONGEST
*result
)
3207 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3208 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3209 struct type
*field_type
= TYPE_FIELD_TYPE (type
, fieldno
);
3212 gdb_assert (val
!= NULL
);
3214 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3215 if (value_bits_any_optimized_out (val
, bit_offset
, bitsize
)
3216 || !value_bits_available (val
, bit_offset
, bitsize
))
3219 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3224 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3225 object at VALADDR. See unpack_bits_as_long for more details. */
3228 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3230 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3231 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3232 struct type
*field_type
= TYPE_FIELD_TYPE (type
, fieldno
);
3234 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3237 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3238 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3239 the contents in DEST_VAL, zero or sign extending if the type of
3240 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3241 VAL. If the VAL's contents required to extract the bitfield from
3242 are unavailable/optimized out, DEST_VAL is correspondingly
3243 marked unavailable/optimized out. */
3246 unpack_value_bitfield (struct value
*dest_val
,
3247 int bitpos
, int bitsize
,
3248 const gdb_byte
*valaddr
, int embedded_offset
,
3249 const struct value
*val
)
3251 enum bfd_endian byte_order
;
3255 struct type
*field_type
= value_type (dest_val
);
3257 /* First, unpack and sign extend the bitfield as if it was wholly
3258 available. Invalid/unavailable bits are read as zero, but that's
3259 OK, as they'll end up marked below. */
3260 byte_order
= gdbarch_byte_order (get_type_arch (field_type
));
3261 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3263 store_signed_integer (value_contents_raw (dest_val
),
3264 TYPE_LENGTH (field_type
), byte_order
, num
);
3266 /* Now copy the optimized out / unavailability ranges to the right
3268 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3269 if (byte_order
== BFD_ENDIAN_BIG
)
3270 dst_bit_offset
= TYPE_LENGTH (field_type
) * TARGET_CHAR_BIT
- bitsize
;
3273 value_ranges_copy_adjusted (dest_val
, dst_bit_offset
,
3274 val
, src_bit_offset
, bitsize
);
3277 /* Return a new value with type TYPE, which is FIELDNO field of the
3278 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3279 of VAL. If the VAL's contents required to extract the bitfield
3280 from are unavailable/optimized out, the new value is
3281 correspondingly marked unavailable/optimized out. */
3284 value_field_bitfield (struct type
*type
, int fieldno
,
3285 const gdb_byte
*valaddr
,
3286 int embedded_offset
, const struct value
*val
)
3288 int bitpos
= TYPE_FIELD_BITPOS (type
, fieldno
);
3289 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3290 struct value
*res_val
= allocate_value (TYPE_FIELD_TYPE (type
, fieldno
));
3292 unpack_value_bitfield (res_val
, bitpos
, bitsize
,
3293 valaddr
, embedded_offset
, val
);
3298 /* Modify the value of a bitfield. ADDR points to a block of memory in
3299 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3300 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3301 indicate which bits (in target bit order) comprise the bitfield.
3302 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3303 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3306 modify_field (struct type
*type
, gdb_byte
*addr
,
3307 LONGEST fieldval
, int bitpos
, int bitsize
)
3309 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
3311 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3314 /* Normalize BITPOS. */
3318 /* If a negative fieldval fits in the field in question, chop
3319 off the sign extension bits. */
3320 if ((~fieldval
& ~(mask
>> 1)) == 0)
3323 /* Warn if value is too big to fit in the field in question. */
3324 if (0 != (fieldval
& ~mask
))
3326 /* FIXME: would like to include fieldval in the message, but
3327 we don't have a sprintf_longest. */
3328 warning (_("Value does not fit in %d bits."), bitsize
);
3330 /* Truncate it, otherwise adjoining fields may be corrupted. */
3334 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3335 false valgrind reports. */
3337 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3338 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3340 /* Shifting for bit field depends on endianness of the target machine. */
3341 if (gdbarch_bits_big_endian (get_type_arch (type
)))
3342 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3344 oword
&= ~(mask
<< bitpos
);
3345 oword
|= fieldval
<< bitpos
;
3347 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3350 /* Pack NUM into BUF using a target format of TYPE. */
3353 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3355 enum bfd_endian byte_order
= gdbarch_byte_order (get_type_arch (type
));
3358 type
= check_typedef (type
);
3359 len
= TYPE_LENGTH (type
);
3361 switch (TYPE_CODE (type
))
3364 case TYPE_CODE_CHAR
:
3365 case TYPE_CODE_ENUM
:
3366 case TYPE_CODE_FLAGS
:
3367 case TYPE_CODE_BOOL
:
3368 case TYPE_CODE_RANGE
:
3369 case TYPE_CODE_MEMBERPTR
:
3370 store_signed_integer (buf
, len
, byte_order
, num
);
3375 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3379 error (_("Unexpected type (%d) encountered for integer constant."),
3385 /* Pack NUM into BUF using a target format of TYPE. */
3388 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3391 enum bfd_endian byte_order
;
3393 type
= check_typedef (type
);
3394 len
= TYPE_LENGTH (type
);
3395 byte_order
= gdbarch_byte_order (get_type_arch (type
));
3397 switch (TYPE_CODE (type
))
3400 case TYPE_CODE_CHAR
:
3401 case TYPE_CODE_ENUM
:
3402 case TYPE_CODE_FLAGS
:
3403 case TYPE_CODE_BOOL
:
3404 case TYPE_CODE_RANGE
:
3405 case TYPE_CODE_MEMBERPTR
:
3406 store_unsigned_integer (buf
, len
, byte_order
, num
);
3411 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3415 error (_("Unexpected type (%d) encountered "
3416 "for unsigned integer constant."),
3422 /* Convert C numbers into newly allocated values. */
3425 value_from_longest (struct type
*type
, LONGEST num
)
3427 struct value
*val
= allocate_value (type
);
3429 pack_long (value_contents_raw (val
), type
, num
);
3434 /* Convert C unsigned numbers into newly allocated values. */
3437 value_from_ulongest (struct type
*type
, ULONGEST num
)
3439 struct value
*val
= allocate_value (type
);
3441 pack_unsigned_long (value_contents_raw (val
), type
, num
);
3447 /* Create a value representing a pointer of type TYPE to the address
3451 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3453 struct value
*val
= allocate_value (type
);
3455 store_typed_address (value_contents_raw (val
),
3456 check_typedef (type
), addr
);
3461 /* Create a value of type TYPE whose contents come from VALADDR, if it
3462 is non-null, and whose memory address (in the inferior) is
3463 ADDRESS. The type of the created value may differ from the passed
3464 type TYPE. Make sure to retrieve values new type after this call.
3465 Note that TYPE is not passed through resolve_dynamic_type; this is
3466 a special API intended for use only by Ada. */
3469 value_from_contents_and_address_unresolved (struct type
*type
,
3470 const gdb_byte
*valaddr
,
3475 if (valaddr
== NULL
)
3476 v
= allocate_value_lazy (type
);
3478 v
= value_from_contents (type
, valaddr
);
3479 set_value_address (v
, address
);
3480 VALUE_LVAL (v
) = lval_memory
;
3484 /* Create a value of type TYPE whose contents come from VALADDR, if it
3485 is non-null, and whose memory address (in the inferior) is
3486 ADDRESS. The type of the created value may differ from the passed
3487 type TYPE. Make sure to retrieve values new type after this call. */
3490 value_from_contents_and_address (struct type
*type
,
3491 const gdb_byte
*valaddr
,
3494 struct type
*resolved_type
= resolve_dynamic_type (type
, address
);
3495 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3498 if (valaddr
== NULL
)
3499 v
= allocate_value_lazy (resolved_type
);
3501 v
= value_from_contents (resolved_type
, valaddr
);
3502 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3503 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef
) == PROP_CONST
)
3504 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3505 set_value_address (v
, address
);
3506 VALUE_LVAL (v
) = lval_memory
;
3510 /* Create a value of type TYPE holding the contents CONTENTS.
3511 The new value is `not_lval'. */
3514 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3516 struct value
*result
;
3518 result
= allocate_value (type
);
3519 memcpy (value_contents_raw (result
), contents
, TYPE_LENGTH (type
));
3524 value_from_double (struct type
*type
, DOUBLEST num
)
3526 struct value
*val
= allocate_value (type
);
3527 struct type
*base_type
= check_typedef (type
);
3528 enum type_code code
= TYPE_CODE (base_type
);
3530 if (code
== TYPE_CODE_FLT
)
3532 store_typed_floating (value_contents_raw (val
), base_type
, num
);
3535 error (_("Unexpected type encountered for floating constant."));
3541 value_from_decfloat (struct type
*type
, const gdb_byte
*dec
)
3543 struct value
*val
= allocate_value (type
);
3545 memcpy (value_contents_raw (val
), dec
, TYPE_LENGTH (type
));
3549 /* Extract a value from the history file. Input will be of the form
3550 $digits or $$digits. See block comment above 'write_dollar_variable'
3554 value_from_history_ref (const char *h
, const char **endp
)
3566 /* Find length of numeral string. */
3567 for (; isdigit (h
[len
]); len
++)
3570 /* Make sure numeral string is not part of an identifier. */
3571 if (h
[len
] == '_' || isalpha (h
[len
]))
3574 /* Now collect the index value. */
3579 /* For some bizarre reason, "$$" is equivalent to "$$1",
3580 rather than to "$$0" as it ought to be! */
3588 index
= -strtol (&h
[2], &local_end
, 10);
3596 /* "$" is equivalent to "$0". */
3604 index
= strtol (&h
[1], &local_end
, 10);
3609 return access_value_history (index
);
3613 coerce_ref_if_computed (const struct value
*arg
)
3615 const struct lval_funcs
*funcs
;
3617 if (TYPE_CODE (check_typedef (value_type (arg
))) != TYPE_CODE_REF
)
3620 if (value_lval_const (arg
) != lval_computed
)
3623 funcs
= value_computed_funcs (arg
);
3624 if (funcs
->coerce_ref
== NULL
)
3627 return funcs
->coerce_ref (arg
);
3630 /* Look at value.h for description. */
3633 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3634 struct type
*original_type
,
3635 struct value
*original_value
)
3637 /* Re-adjust type. */
3638 deprecated_set_value_type (value
, TYPE_TARGET_TYPE (original_type
));
3640 /* Add embedding info. */
3641 set_value_enclosing_type (value
, enc_type
);
3642 set_value_embedded_offset (value
, value_pointed_to_offset (original_value
));
3644 /* We may be pointing to an object of some derived type. */
3645 return value_full_object (value
, NULL
, 0, 0, 0);
3649 coerce_ref (struct value
*arg
)
3651 struct type
*value_type_arg_tmp
= check_typedef (value_type (arg
));
3652 struct value
*retval
;
3653 struct type
*enc_type
;
3655 retval
= coerce_ref_if_computed (arg
);
3659 if (TYPE_CODE (value_type_arg_tmp
) != TYPE_CODE_REF
)
3662 enc_type
= check_typedef (value_enclosing_type (arg
));
3663 enc_type
= TYPE_TARGET_TYPE (enc_type
);
3665 retval
= value_at_lazy (enc_type
,
3666 unpack_pointer (value_type (arg
),
3667 value_contents (arg
)));
3668 enc_type
= value_type (retval
);
3669 return readjust_indirect_value_type (retval
, enc_type
,
3670 value_type_arg_tmp
, arg
);
3674 coerce_array (struct value
*arg
)
3678 arg
= coerce_ref (arg
);
3679 type
= check_typedef (value_type (arg
));
3681 switch (TYPE_CODE (type
))
3683 case TYPE_CODE_ARRAY
:
3684 if (!TYPE_VECTOR (type
) && current_language
->c_style_arrays
)
3685 arg
= value_coerce_array (arg
);
3687 case TYPE_CODE_FUNC
:
3688 arg
= value_coerce_function (arg
);
3695 /* Return the return value convention that will be used for the
3698 enum return_value_convention
3699 struct_return_convention (struct gdbarch
*gdbarch
,
3700 struct value
*function
, struct type
*value_type
)
3702 enum type_code code
= TYPE_CODE (value_type
);
3704 if (code
== TYPE_CODE_ERROR
)
3705 error (_("Function return type unknown."));
3707 /* Probe the architecture for the return-value convention. */
3708 return gdbarch_return_value (gdbarch
, function
, value_type
,
3712 /* Return true if the function returning the specified type is using
3713 the convention of returning structures in memory (passing in the
3714 address as a hidden first parameter). */
3717 using_struct_return (struct gdbarch
*gdbarch
,
3718 struct value
*function
, struct type
*value_type
)
3720 if (TYPE_CODE (value_type
) == TYPE_CODE_VOID
)
3721 /* A void return value is never in memory. See also corresponding
3722 code in "print_return_value". */
3725 return (struct_return_convention (gdbarch
, function
, value_type
)
3726 != RETURN_VALUE_REGISTER_CONVENTION
);
3729 /* Set the initialized field in a value struct. */
3732 set_value_initialized (struct value
*val
, int status
)
3734 val
->initialized
= status
;
3737 /* Return the initialized field in a value struct. */
3740 value_initialized (struct value
*val
)
3742 return val
->initialized
;
3745 /* Called only from the value_contents and value_contents_all()
3746 macros, if the current data for a variable needs to be loaded into
3747 value_contents(VAL). Fetches the data from the user's process, and
3748 clears the lazy flag to indicate that the data in the buffer is
3751 If the value is zero-length, we avoid calling read_memory, which
3752 would abort. We mark the value as fetched anyway -- all 0 bytes of
3755 This function returns a value because it is used in the
3756 value_contents macro as part of an expression, where a void would
3757 not work. The value is ignored. */
3760 value_fetch_lazy (struct value
*val
)
3762 gdb_assert (value_lazy (val
));
3763 allocate_value_contents (val
);
3764 /* A value is either lazy, or fully fetched. The
3765 availability/validity is only established as we try to fetch a
3767 gdb_assert (VEC_empty (range_s
, val
->optimized_out
));
3768 gdb_assert (VEC_empty (range_s
, val
->unavailable
));
3769 if (value_bitsize (val
))
3771 /* To read a lazy bitfield, read the entire enclosing value. This
3772 prevents reading the same block of (possibly volatile) memory once
3773 per bitfield. It would be even better to read only the containing
3774 word, but we have no way to record that just specific bits of a
3775 value have been fetched. */
3776 struct type
*type
= check_typedef (value_type (val
));
3777 struct value
*parent
= value_parent (val
);
3779 if (value_lazy (parent
))
3780 value_fetch_lazy (parent
);
3782 unpack_value_bitfield (val
,
3783 value_bitpos (val
), value_bitsize (val
),
3784 value_contents_for_printing (parent
),
3785 value_offset (val
), parent
);
3787 else if (VALUE_LVAL (val
) == lval_memory
)
3789 CORE_ADDR addr
= value_address (val
);
3790 struct type
*type
= check_typedef (value_enclosing_type (val
));
3792 if (TYPE_LENGTH (type
))
3793 read_value_memory (val
, 0, value_stack (val
),
3794 addr
, value_contents_all_raw (val
),
3795 TYPE_LENGTH (type
));
3797 else if (VALUE_LVAL (val
) == lval_register
)
3799 struct frame_info
*frame
;
3801 struct type
*type
= check_typedef (value_type (val
));
3802 struct value
*new_val
= val
, *mark
= value_mark ();
3804 /* Offsets are not supported here; lazy register values must
3805 refer to the entire register. */
3806 gdb_assert (value_offset (val
) == 0);
3808 while (VALUE_LVAL (new_val
) == lval_register
&& value_lazy (new_val
))
3810 struct frame_id frame_id
= VALUE_FRAME_ID (new_val
);
3812 frame
= frame_find_by_id (frame_id
);
3813 regnum
= VALUE_REGNUM (new_val
);
3815 gdb_assert (frame
!= NULL
);
3817 /* Convertible register routines are used for multi-register
3818 values and for interpretation in different types
3819 (e.g. float or int from a double register). Lazy
3820 register values should have the register's natural type,
3821 so they do not apply. */
3822 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame
),
3825 new_val
= get_frame_register_value (frame
, regnum
);
3827 /* If we get another lazy lval_register value, it means the
3828 register is found by reading it from the next frame.
3829 get_frame_register_value should never return a value with
3830 the frame id pointing to FRAME. If it does, it means we
3831 either have two consecutive frames with the same frame id
3832 in the frame chain, or some code is trying to unwind
3833 behind get_prev_frame's back (e.g., a frame unwind
3834 sniffer trying to unwind), bypassing its validations. In
3835 any case, it should always be an internal error to end up
3836 in this situation. */
3837 if (VALUE_LVAL (new_val
) == lval_register
3838 && value_lazy (new_val
)
3839 && frame_id_eq (VALUE_FRAME_ID (new_val
), frame_id
))
3840 internal_error (__FILE__
, __LINE__
,
3841 _("infinite loop while fetching a register"));
3844 /* If it's still lazy (for instance, a saved register on the
3845 stack), fetch it. */
3846 if (value_lazy (new_val
))
3847 value_fetch_lazy (new_val
);
3849 /* Copy the contents and the unavailability/optimized-out
3850 meta-data from NEW_VAL to VAL. */
3851 set_value_lazy (val
, 0);
3852 value_contents_copy (val
, value_embedded_offset (val
),
3853 new_val
, value_embedded_offset (new_val
),
3854 TYPE_LENGTH (type
));
3858 struct gdbarch
*gdbarch
;
3859 frame
= frame_find_by_id (VALUE_FRAME_ID (val
));
3860 regnum
= VALUE_REGNUM (val
);
3861 gdbarch
= get_frame_arch (frame
);
3863 fprintf_unfiltered (gdb_stdlog
,
3864 "{ value_fetch_lazy "
3865 "(frame=%d,regnum=%d(%s),...) ",
3866 frame_relative_level (frame
), regnum
,
3867 user_reg_map_regnum_to_name (gdbarch
, regnum
));
3869 fprintf_unfiltered (gdb_stdlog
, "->");
3870 if (value_optimized_out (new_val
))
3872 fprintf_unfiltered (gdb_stdlog
, " ");
3873 val_print_optimized_out (new_val
, gdb_stdlog
);
3878 const gdb_byte
*buf
= value_contents (new_val
);
3880 if (VALUE_LVAL (new_val
) == lval_register
)
3881 fprintf_unfiltered (gdb_stdlog
, " register=%d",
3882 VALUE_REGNUM (new_val
));
3883 else if (VALUE_LVAL (new_val
) == lval_memory
)
3884 fprintf_unfiltered (gdb_stdlog
, " address=%s",
3886 value_address (new_val
)));
3888 fprintf_unfiltered (gdb_stdlog
, " computed");
3890 fprintf_unfiltered (gdb_stdlog
, " bytes=");
3891 fprintf_unfiltered (gdb_stdlog
, "[");
3892 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
3893 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3894 fprintf_unfiltered (gdb_stdlog
, "]");
3897 fprintf_unfiltered (gdb_stdlog
, " }\n");
3900 /* Dispose of the intermediate values. This prevents
3901 watchpoints from trying to watch the saved frame pointer. */
3902 value_free_to_mark (mark
);
3904 else if (VALUE_LVAL (val
) == lval_computed
3905 && value_computed_funcs (val
)->read
!= NULL
)
3906 value_computed_funcs (val
)->read (val
);
3908 internal_error (__FILE__
, __LINE__
, _("Unexpected lazy value type."));
3910 set_value_lazy (val
, 0);
3914 /* Implementation of the convenience function $_isvoid. */
3916 static struct value
*
3917 isvoid_internal_fn (struct gdbarch
*gdbarch
,
3918 const struct language_defn
*language
,
3919 void *cookie
, int argc
, struct value
**argv
)
3924 error (_("You must provide one argument for $_isvoid."));
3926 ret
= TYPE_CODE (value_type (argv
[0])) == TYPE_CODE_VOID
;
3928 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
3932 _initialize_values (void)
3934 add_cmd ("convenience", no_class
, show_convenience
, _("\
3935 Debugger convenience (\"$foo\") variables and functions.\n\
3936 Convenience variables are created when you assign them values;\n\
3937 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3939 A few convenience variables are given values automatically:\n\
3940 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3941 \"$__\" holds the contents of the last address examined with \"x\"."
3944 Convenience functions are defined via the Python API."
3947 add_alias_cmd ("conv", "convenience", no_class
, 1, &showlist
);
3949 add_cmd ("values", no_set_class
, show_values
, _("\
3950 Elements of value history around item number IDX (or last ten)."),
3953 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
3954 Initialize a convenience variable if necessary.\n\
3955 init-if-undefined VARIABLE = EXPRESSION\n\
3956 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3957 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3958 VARIABLE is already initialized."));
3960 add_prefix_cmd ("function", no_class
, function_command
, _("\
3961 Placeholder command for showing help on convenience functions."),
3962 &functionlist
, "function ", 0, &cmdlist
);
3964 add_internal_function ("_isvoid", _("\
3965 Check whether an expression is void.\n\
3966 Usage: $_isvoid (expression)\n\
3967 Return 1 if the expression is void, zero otherwise."),
3968 isvoid_internal_fn
, NULL
);