6116b681d39a7812ab324b419ee386c8ce2fdec5
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44
45 /* Definition of a user function. */
46 struct internal_function
47 {
48 /* The name of the function. It is a bit odd to have this in the
49 function itself -- the user might use a differently-named
50 convenience variable to hold the function. */
51 char *name;
52
53 /* The handler. */
54 internal_function_fn handler;
55
56 /* User data for the handler. */
57 void *cookie;
58 };
59
60 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
61
62 struct range
63 {
64 /* Lowest offset in the range. */
65 LONGEST offset;
66
67 /* Length of the range. */
68 LONGEST length;
69 };
70
71 typedef struct range range_s;
72
73 DEF_VEC_O(range_s);
74
75 /* Returns true if the ranges defined by [offset1, offset1+len1) and
76 [offset2, offset2+len2) overlap. */
77
78 static int
79 ranges_overlap (LONGEST offset1, LONGEST len1,
80 LONGEST offset2, LONGEST len2)
81 {
82 ULONGEST h, l;
83
84 l = std::max (offset1, offset2);
85 h = std::min (offset1 + len1, offset2 + len2);
86 return (l < h);
87 }
88
89 /* Returns true if the first argument is strictly less than the
90 second, useful for VEC_lower_bound. We keep ranges sorted by
91 offset and coalesce overlapping and contiguous ranges, so this just
92 compares the starting offset. */
93
94 static int
95 range_lessthan (const range_s *r1, const range_s *r2)
96 {
97 return r1->offset < r2->offset;
98 }
99
100 /* Returns true if RANGES contains any range that overlaps [OFFSET,
101 OFFSET+LENGTH). */
102
103 static int
104 ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
105 {
106 range_s what;
107 LONGEST i;
108
109 what.offset = offset;
110 what.length = length;
111
112 /* We keep ranges sorted by offset and coalesce overlapping and
113 contiguous ranges, so to check if a range list contains a given
114 range, we can do a binary search for the position the given range
115 would be inserted if we only considered the starting OFFSET of
116 ranges. We call that position I. Since we also have LENGTH to
117 care for (this is a range afterall), we need to check if the
118 _previous_ range overlaps the I range. E.g.,
119
120 R
121 |---|
122 |---| |---| |------| ... |--|
123 0 1 2 N
124
125 I=1
126
127 In the case above, the binary search would return `I=1', meaning,
128 this OFFSET should be inserted at position 1, and the current
129 position 1 should be pushed further (and before 2). But, `0'
130 overlaps with R.
131
132 Then we need to check if the I range overlaps the I range itself.
133 E.g.,
134
135 R
136 |---|
137 |---| |---| |-------| ... |--|
138 0 1 2 N
139
140 I=1
141 */
142
143 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
144
145 if (i > 0)
146 {
147 struct range *bef = VEC_index (range_s, ranges, i - 1);
148
149 if (ranges_overlap (bef->offset, bef->length, offset, length))
150 return 1;
151 }
152
153 if (i < VEC_length (range_s, ranges))
154 {
155 struct range *r = VEC_index (range_s, ranges, i);
156
157 if (ranges_overlap (r->offset, r->length, offset, length))
158 return 1;
159 }
160
161 return 0;
162 }
163
164 static struct cmd_list_element *functionlist;
165
166 /* Note that the fields in this structure are arranged to save a bit
167 of memory. */
168
169 struct value
170 {
171 explicit value (struct type *type_)
172 : modifiable (1),
173 lazy (1),
174 initialized (1),
175 stack (0),
176 type (type_),
177 enclosing_type (type_)
178 {
179 location.address = 0;
180 }
181
182 ~value ()
183 {
184 if (VALUE_LVAL (this) == lval_computed)
185 {
186 const struct lval_funcs *funcs = location.computed.funcs;
187
188 if (funcs->free_closure)
189 funcs->free_closure (this);
190 }
191 else if (VALUE_LVAL (this) == lval_xcallable)
192 delete location.xm_worker;
193
194 xfree (contents);
195 VEC_free (range_s, unavailable);
196 VEC_free (range_s, optimized_out);
197 }
198
199 DISABLE_COPY_AND_ASSIGN (value);
200
201 /* Type of value; either not an lval, or one of the various
202 different possible kinds of lval. */
203 enum lval_type lval = not_lval;
204
205 /* Is it modifiable? Only relevant if lval != not_lval. */
206 unsigned int modifiable : 1;
207
208 /* If zero, contents of this value are in the contents field. If
209 nonzero, contents are in inferior. If the lval field is lval_memory,
210 the contents are in inferior memory at location.address plus offset.
211 The lval field may also be lval_register.
212
213 WARNING: This field is used by the code which handles watchpoints
214 (see breakpoint.c) to decide whether a particular value can be
215 watched by hardware watchpoints. If the lazy flag is set for
216 some member of a value chain, it is assumed that this member of
217 the chain doesn't need to be watched as part of watching the
218 value itself. This is how GDB avoids watching the entire struct
219 or array when the user wants to watch a single struct member or
220 array element. If you ever change the way lazy flag is set and
221 reset, be sure to consider this use as well! */
222 unsigned int lazy : 1;
223
224 /* If value is a variable, is it initialized or not. */
225 unsigned int initialized : 1;
226
227 /* If value is from the stack. If this is set, read_stack will be
228 used instead of read_memory to enable extra caching. */
229 unsigned int stack : 1;
230
231 /* Location of value (if lval). */
232 union
233 {
234 /* If lval == lval_memory, this is the address in the inferior */
235 CORE_ADDR address;
236
237 /*If lval == lval_register, the value is from a register. */
238 struct
239 {
240 /* Register number. */
241 int regnum;
242 /* Frame ID of "next" frame to which a register value is relative.
243 If the register value is found relative to frame F, then the
244 frame id of F->next will be stored in next_frame_id. */
245 struct frame_id next_frame_id;
246 } reg;
247
248 /* Pointer to internal variable. */
249 struct internalvar *internalvar;
250
251 /* Pointer to xmethod worker. */
252 struct xmethod_worker *xm_worker;
253
254 /* If lval == lval_computed, this is a set of function pointers
255 to use to access and describe the value, and a closure pointer
256 for them to use. */
257 struct
258 {
259 /* Functions to call. */
260 const struct lval_funcs *funcs;
261
262 /* Closure for those functions to use. */
263 void *closure;
264 } computed;
265 } location;
266
267 /* Describes offset of a value within lval of a structure in target
268 addressable memory units. Note also the member embedded_offset
269 below. */
270 LONGEST offset = 0;
271
272 /* Only used for bitfields; number of bits contained in them. */
273 LONGEST bitsize = 0;
274
275 /* Only used for bitfields; position of start of field. For
276 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
277 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
278 LONGEST bitpos = 0;
279
280 /* The number of references to this value. When a value is created,
281 the value chain holds a reference, so REFERENCE_COUNT is 1. If
282 release_value is called, this value is removed from the chain but
283 the caller of release_value now has a reference to this value.
284 The caller must arrange for a call to value_free later. */
285 int reference_count = 1;
286
287 /* Only used for bitfields; the containing value. This allows a
288 single read from the target when displaying multiple
289 bitfields. */
290 value_ref_ptr parent;
291
292 /* Type of the value. */
293 struct type *type;
294
295 /* If a value represents a C++ object, then the `type' field gives
296 the object's compile-time type. If the object actually belongs
297 to some class derived from `type', perhaps with other base
298 classes and additional members, then `type' is just a subobject
299 of the real thing, and the full object is probably larger than
300 `type' would suggest.
301
302 If `type' is a dynamic class (i.e. one with a vtable), then GDB
303 can actually determine the object's run-time type by looking at
304 the run-time type information in the vtable. When this
305 information is available, we may elect to read in the entire
306 object, for several reasons:
307
308 - When printing the value, the user would probably rather see the
309 full object, not just the limited portion apparent from the
310 compile-time type.
311
312 - If `type' has virtual base classes, then even printing `type'
313 alone may require reaching outside the `type' portion of the
314 object to wherever the virtual base class has been stored.
315
316 When we store the entire object, `enclosing_type' is the run-time
317 type -- the complete object -- and `embedded_offset' is the
318 offset of `type' within that larger type, in target addressable memory
319 units. The value_contents() macro takes `embedded_offset' into account,
320 so most GDB code continues to see the `type' portion of the value, just
321 as the inferior would.
322
323 If `type' is a pointer to an object, then `enclosing_type' is a
324 pointer to the object's run-time type, and `pointed_to_offset' is
325 the offset in target addressable memory units from the full object
326 to the pointed-to object -- that is, the value `embedded_offset' would
327 have if we followed the pointer and fetched the complete object.
328 (I don't really see the point. Why not just determine the
329 run-time type when you indirect, and avoid the special case? The
330 contents don't matter until you indirect anyway.)
331
332 If we're not doing anything fancy, `enclosing_type' is equal to
333 `type', and `embedded_offset' is zero, so everything works
334 normally. */
335 struct type *enclosing_type;
336 LONGEST embedded_offset = 0;
337 LONGEST pointed_to_offset = 0;
338
339 /* Actual contents of the value. Target byte-order. NULL or not
340 valid if lazy is nonzero. */
341 gdb_byte *contents = nullptr;
342
343 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
344 rather than available, since the common and default case is for a
345 value to be available. This is filled in at value read time.
346 The unavailable ranges are tracked in bits. Note that a contents
347 bit that has been optimized out doesn't really exist in the
348 program, so it can't be marked unavailable either. */
349 VEC(range_s) *unavailable = nullptr;
350
351 /* Likewise, but for optimized out contents (a chunk of the value of
352 a variable that does not actually exist in the program). If LVAL
353 is lval_register, this is a register ($pc, $sp, etc., never a
354 program variable) that has not been saved in the frame. Not
355 saved registers and optimized-out program variables values are
356 treated pretty much the same, except not-saved registers have a
357 different string representation and related error strings. */
358 VEC(range_s) *optimized_out = nullptr;
359 };
360
361 /* See value.h. */
362
363 struct gdbarch *
364 get_value_arch (const struct value *value)
365 {
366 return get_type_arch (value_type (value));
367 }
368
369 int
370 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
371 {
372 gdb_assert (!value->lazy);
373
374 return !ranges_contain (value->unavailable, offset, length);
375 }
376
377 int
378 value_bytes_available (const struct value *value,
379 LONGEST offset, LONGEST length)
380 {
381 return value_bits_available (value,
382 offset * TARGET_CHAR_BIT,
383 length * TARGET_CHAR_BIT);
384 }
385
386 int
387 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
388 {
389 gdb_assert (!value->lazy);
390
391 return ranges_contain (value->optimized_out, bit_offset, bit_length);
392 }
393
394 int
395 value_entirely_available (struct value *value)
396 {
397 /* We can only tell whether the whole value is available when we try
398 to read it. */
399 if (value->lazy)
400 value_fetch_lazy (value);
401
402 if (VEC_empty (range_s, value->unavailable))
403 return 1;
404 return 0;
405 }
406
407 /* Returns true if VALUE is entirely covered by RANGES. If the value
408 is lazy, it'll be read now. Note that RANGE is a pointer to
409 pointer because reading the value might change *RANGE. */
410
411 static int
412 value_entirely_covered_by_range_vector (struct value *value,
413 VEC(range_s) **ranges)
414 {
415 /* We can only tell whether the whole value is optimized out /
416 unavailable when we try to read it. */
417 if (value->lazy)
418 value_fetch_lazy (value);
419
420 if (VEC_length (range_s, *ranges) == 1)
421 {
422 struct range *t = VEC_index (range_s, *ranges, 0);
423
424 if (t->offset == 0
425 && t->length == (TARGET_CHAR_BIT
426 * TYPE_LENGTH (value_enclosing_type (value))))
427 return 1;
428 }
429
430 return 0;
431 }
432
433 int
434 value_entirely_unavailable (struct value *value)
435 {
436 return value_entirely_covered_by_range_vector (value, &value->unavailable);
437 }
438
439 int
440 value_entirely_optimized_out (struct value *value)
441 {
442 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
443 }
444
445 /* Insert into the vector pointed to by VECTORP the bit range starting of
446 OFFSET bits, and extending for the next LENGTH bits. */
447
448 static void
449 insert_into_bit_range_vector (VEC(range_s) **vectorp,
450 LONGEST offset, LONGEST length)
451 {
452 range_s newr;
453 int i;
454
455 /* Insert the range sorted. If there's overlap or the new range
456 would be contiguous with an existing range, merge. */
457
458 newr.offset = offset;
459 newr.length = length;
460
461 /* Do a binary search for the position the given range would be
462 inserted if we only considered the starting OFFSET of ranges.
463 Call that position I. Since we also have LENGTH to care for
464 (this is a range afterall), we need to check if the _previous_
465 range overlaps the I range. E.g., calling R the new range:
466
467 #1 - overlaps with previous
468
469 R
470 |-...-|
471 |---| |---| |------| ... |--|
472 0 1 2 N
473
474 I=1
475
476 In the case #1 above, the binary search would return `I=1',
477 meaning, this OFFSET should be inserted at position 1, and the
478 current position 1 should be pushed further (and become 2). But,
479 note that `0' overlaps with R, so we want to merge them.
480
481 A similar consideration needs to be taken if the new range would
482 be contiguous with the previous range:
483
484 #2 - contiguous with previous
485
486 R
487 |-...-|
488 |--| |---| |------| ... |--|
489 0 1 2 N
490
491 I=1
492
493 If there's no overlap with the previous range, as in:
494
495 #3 - not overlapping and not contiguous
496
497 R
498 |-...-|
499 |--| |---| |------| ... |--|
500 0 1 2 N
501
502 I=1
503
504 or if I is 0:
505
506 #4 - R is the range with lowest offset
507
508 R
509 |-...-|
510 |--| |---| |------| ... |--|
511 0 1 2 N
512
513 I=0
514
515 ... we just push the new range to I.
516
517 All the 4 cases above need to consider that the new range may
518 also overlap several of the ranges that follow, or that R may be
519 contiguous with the following range, and merge. E.g.,
520
521 #5 - overlapping following ranges
522
523 R
524 |------------------------|
525 |--| |---| |------| ... |--|
526 0 1 2 N
527
528 I=0
529
530 or:
531
532 R
533 |-------|
534 |--| |---| |------| ... |--|
535 0 1 2 N
536
537 I=1
538
539 */
540
541 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
542 if (i > 0)
543 {
544 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
545
546 if (ranges_overlap (bef->offset, bef->length, offset, length))
547 {
548 /* #1 */
549 ULONGEST l = std::min (bef->offset, offset);
550 ULONGEST h = std::max (bef->offset + bef->length, offset + length);
551
552 bef->offset = l;
553 bef->length = h - l;
554 i--;
555 }
556 else if (offset == bef->offset + bef->length)
557 {
558 /* #2 */
559 bef->length += length;
560 i--;
561 }
562 else
563 {
564 /* #3 */
565 VEC_safe_insert (range_s, *vectorp, i, &newr);
566 }
567 }
568 else
569 {
570 /* #4 */
571 VEC_safe_insert (range_s, *vectorp, i, &newr);
572 }
573
574 /* Check whether the ranges following the one we've just added or
575 touched can be folded in (#5 above). */
576 if (i + 1 < VEC_length (range_s, *vectorp))
577 {
578 struct range *t;
579 struct range *r;
580 int removed = 0;
581 int next = i + 1;
582
583 /* Get the range we just touched. */
584 t = VEC_index (range_s, *vectorp, i);
585 removed = 0;
586
587 i = next;
588 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
589 if (r->offset <= t->offset + t->length)
590 {
591 ULONGEST l, h;
592
593 l = std::min (t->offset, r->offset);
594 h = std::max (t->offset + t->length, r->offset + r->length);
595
596 t->offset = l;
597 t->length = h - l;
598
599 removed++;
600 }
601 else
602 {
603 /* If we couldn't merge this one, we won't be able to
604 merge following ones either, since the ranges are
605 always sorted by OFFSET. */
606 break;
607 }
608
609 if (removed != 0)
610 VEC_block_remove (range_s, *vectorp, next, removed);
611 }
612 }
613
614 void
615 mark_value_bits_unavailable (struct value *value,
616 LONGEST offset, LONGEST length)
617 {
618 insert_into_bit_range_vector (&value->unavailable, offset, length);
619 }
620
621 void
622 mark_value_bytes_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 mark_value_bits_unavailable (value,
626 offset * TARGET_CHAR_BIT,
627 length * TARGET_CHAR_BIT);
628 }
629
630 /* Find the first range in RANGES that overlaps the range defined by
631 OFFSET and LENGTH, starting at element POS in the RANGES vector,
632 Returns the index into RANGES where such overlapping range was
633 found, or -1 if none was found. */
634
635 static int
636 find_first_range_overlap (VEC(range_s) *ranges, int pos,
637 LONGEST offset, LONGEST length)
638 {
639 range_s *r;
640 int i;
641
642 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
643 if (ranges_overlap (r->offset, r->length, offset, length))
644 return i;
645
646 return -1;
647 }
648
649 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
650 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
651 return non-zero.
652
653 It must always be the case that:
654 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
655
656 It is assumed that memory can be accessed from:
657 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
658 to:
659 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
660 / TARGET_CHAR_BIT) */
661 static int
662 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
663 const gdb_byte *ptr2, size_t offset2_bits,
664 size_t length_bits)
665 {
666 gdb_assert (offset1_bits % TARGET_CHAR_BIT
667 == offset2_bits % TARGET_CHAR_BIT);
668
669 if (offset1_bits % TARGET_CHAR_BIT != 0)
670 {
671 size_t bits;
672 gdb_byte mask, b1, b2;
673
674 /* The offset from the base pointers PTR1 and PTR2 is not a complete
675 number of bytes. A number of bits up to either the next exact
676 byte boundary, or LENGTH_BITS (which ever is sooner) will be
677 compared. */
678 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
679 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
680 mask = (1 << bits) - 1;
681
682 if (length_bits < bits)
683 {
684 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
685 bits = length_bits;
686 }
687
688 /* Now load the two bytes and mask off the bits we care about. */
689 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
690 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
691
692 if (b1 != b2)
693 return 1;
694
695 /* Now update the length and offsets to take account of the bits
696 we've just compared. */
697 length_bits -= bits;
698 offset1_bits += bits;
699 offset2_bits += bits;
700 }
701
702 if (length_bits % TARGET_CHAR_BIT != 0)
703 {
704 size_t bits;
705 size_t o1, o2;
706 gdb_byte mask, b1, b2;
707
708 /* The length is not an exact number of bytes. After the previous
709 IF.. block then the offsets are byte aligned, or the
710 length is zero (in which case this code is not reached). Compare
711 a number of bits at the end of the region, starting from an exact
712 byte boundary. */
713 bits = length_bits % TARGET_CHAR_BIT;
714 o1 = offset1_bits + length_bits - bits;
715 o2 = offset2_bits + length_bits - bits;
716
717 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
718 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
719
720 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
721 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
722
723 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
724 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
725
726 if (b1 != b2)
727 return 1;
728
729 length_bits -= bits;
730 }
731
732 if (length_bits > 0)
733 {
734 /* We've now taken care of any stray "bits" at the start, or end of
735 the region to compare, the remainder can be covered with a simple
736 memcmp. */
737 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
738 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
739 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
740
741 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
742 ptr2 + offset2_bits / TARGET_CHAR_BIT,
743 length_bits / TARGET_CHAR_BIT);
744 }
745
746 /* Length is zero, regions match. */
747 return 0;
748 }
749
750 /* Helper struct for find_first_range_overlap_and_match and
751 value_contents_bits_eq. Keep track of which slot of a given ranges
752 vector have we last looked at. */
753
754 struct ranges_and_idx
755 {
756 /* The ranges. */
757 VEC(range_s) *ranges;
758
759 /* The range we've last found in RANGES. Given ranges are sorted,
760 we can start the next lookup here. */
761 int idx;
762 };
763
764 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
765 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
766 ranges starting at OFFSET2 bits. Return true if the ranges match
767 and fill in *L and *H with the overlapping window relative to
768 (both) OFFSET1 or OFFSET2. */
769
770 static int
771 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
772 struct ranges_and_idx *rp2,
773 LONGEST offset1, LONGEST offset2,
774 LONGEST length, ULONGEST *l, ULONGEST *h)
775 {
776 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
777 offset1, length);
778 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
779 offset2, length);
780
781 if (rp1->idx == -1 && rp2->idx == -1)
782 {
783 *l = length;
784 *h = length;
785 return 1;
786 }
787 else if (rp1->idx == -1 || rp2->idx == -1)
788 return 0;
789 else
790 {
791 range_s *r1, *r2;
792 ULONGEST l1, h1;
793 ULONGEST l2, h2;
794
795 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
796 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
797
798 /* Get the unavailable windows intersected by the incoming
799 ranges. The first and last ranges that overlap the argument
800 range may be wider than said incoming arguments ranges. */
801 l1 = std::max (offset1, r1->offset);
802 h1 = std::min (offset1 + length, r1->offset + r1->length);
803
804 l2 = std::max (offset2, r2->offset);
805 h2 = std::min (offset2 + length, offset2 + r2->length);
806
807 /* Make them relative to the respective start offsets, so we can
808 compare them for equality. */
809 l1 -= offset1;
810 h1 -= offset1;
811
812 l2 -= offset2;
813 h2 -= offset2;
814
815 /* Different ranges, no match. */
816 if (l1 != l2 || h1 != h2)
817 return 0;
818
819 *h = h1;
820 *l = l1;
821 return 1;
822 }
823 }
824
825 /* Helper function for value_contents_eq. The only difference is that
826 this function is bit rather than byte based.
827
828 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
829 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
830 Return true if the available bits match. */
831
832 static bool
833 value_contents_bits_eq (const struct value *val1, int offset1,
834 const struct value *val2, int offset2,
835 int length)
836 {
837 /* Each array element corresponds to a ranges source (unavailable,
838 optimized out). '1' is for VAL1, '2' for VAL2. */
839 struct ranges_and_idx rp1[2], rp2[2];
840
841 /* See function description in value.h. */
842 gdb_assert (!val1->lazy && !val2->lazy);
843
844 /* We shouldn't be trying to compare past the end of the values. */
845 gdb_assert (offset1 + length
846 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
847 gdb_assert (offset2 + length
848 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
849
850 memset (&rp1, 0, sizeof (rp1));
851 memset (&rp2, 0, sizeof (rp2));
852 rp1[0].ranges = val1->unavailable;
853 rp2[0].ranges = val2->unavailable;
854 rp1[1].ranges = val1->optimized_out;
855 rp2[1].ranges = val2->optimized_out;
856
857 while (length > 0)
858 {
859 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
860 int i;
861
862 for (i = 0; i < 2; i++)
863 {
864 ULONGEST l_tmp, h_tmp;
865
866 /* The contents only match equal if the invalid/unavailable
867 contents ranges match as well. */
868 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
869 offset1, offset2, length,
870 &l_tmp, &h_tmp))
871 return false;
872
873 /* We're interested in the lowest/first range found. */
874 if (i == 0 || l_tmp < l)
875 {
876 l = l_tmp;
877 h = h_tmp;
878 }
879 }
880
881 /* Compare the available/valid contents. */
882 if (memcmp_with_bit_offsets (val1->contents, offset1,
883 val2->contents, offset2, l) != 0)
884 return false;
885
886 length -= h;
887 offset1 += h;
888 offset2 += h;
889 }
890
891 return true;
892 }
893
894 bool
895 value_contents_eq (const struct value *val1, LONGEST offset1,
896 const struct value *val2, LONGEST offset2,
897 LONGEST length)
898 {
899 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
900 val2, offset2 * TARGET_CHAR_BIT,
901 length * TARGET_CHAR_BIT);
902 }
903
904
905 /* The value-history records all the values printed by print commands
906 during this session. */
907
908 static std::vector<value_ref_ptr> value_history;
909
910 \f
911 /* List of all value objects currently allocated
912 (except for those released by calls to release_value)
913 This is so they can be freed after each command. */
914
915 static std::vector<value_ref_ptr> all_values;
916
917 /* Allocate a lazy value for type TYPE. Its actual content is
918 "lazily" allocated too: the content field of the return value is
919 NULL; it will be allocated when it is fetched from the target. */
920
921 struct value *
922 allocate_value_lazy (struct type *type)
923 {
924 struct value *val;
925
926 /* Call check_typedef on our type to make sure that, if TYPE
927 is a TYPE_CODE_TYPEDEF, its length is set to the length
928 of the target type instead of zero. However, we do not
929 replace the typedef type by the target type, because we want
930 to keep the typedef in order to be able to set the VAL's type
931 description correctly. */
932 check_typedef (type);
933
934 val = new struct value (type);
935
936 /* Values start out on the all_values chain. */
937 all_values.emplace_back (val);
938
939 return val;
940 }
941
942 /* The maximum size, in bytes, that GDB will try to allocate for a value.
943 The initial value of 64k was not selected for any specific reason, it is
944 just a reasonable starting point. */
945
946 static int max_value_size = 65536; /* 64k bytes */
947
948 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
949 LONGEST, otherwise GDB will not be able to parse integer values from the
950 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
951 be unable to parse "set max-value-size 2".
952
953 As we want a consistent GDB experience across hosts with different sizes
954 of LONGEST, this arbitrary minimum value was selected, so long as this
955 is bigger than LONGEST on all GDB supported hosts we're fine. */
956
957 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
958 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
959
960 /* Implement the "set max-value-size" command. */
961
962 static void
963 set_max_value_size (const char *args, int from_tty,
964 struct cmd_list_element *c)
965 {
966 gdb_assert (max_value_size == -1 || max_value_size >= 0);
967
968 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
969 {
970 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
971 error (_("max-value-size set too low, increasing to %d bytes"),
972 max_value_size);
973 }
974 }
975
976 /* Implement the "show max-value-size" command. */
977
978 static void
979 show_max_value_size (struct ui_file *file, int from_tty,
980 struct cmd_list_element *c, const char *value)
981 {
982 if (max_value_size == -1)
983 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
984 else
985 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
986 max_value_size);
987 }
988
989 /* Called before we attempt to allocate or reallocate a buffer for the
990 contents of a value. TYPE is the type of the value for which we are
991 allocating the buffer. If the buffer is too large (based on the user
992 controllable setting) then throw an error. If this function returns
993 then we should attempt to allocate the buffer. */
994
995 static void
996 check_type_length_before_alloc (const struct type *type)
997 {
998 unsigned int length = TYPE_LENGTH (type);
999
1000 if (max_value_size > -1 && length > max_value_size)
1001 {
1002 if (TYPE_NAME (type) != NULL)
1003 error (_("value of type `%s' requires %u bytes, which is more "
1004 "than max-value-size"), TYPE_NAME (type), length);
1005 else
1006 error (_("value requires %u bytes, which is more than "
1007 "max-value-size"), length);
1008 }
1009 }
1010
1011 /* Allocate the contents of VAL if it has not been allocated yet. */
1012
1013 static void
1014 allocate_value_contents (struct value *val)
1015 {
1016 if (!val->contents)
1017 {
1018 check_type_length_before_alloc (val->enclosing_type);
1019 val->contents
1020 = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
1021 }
1022 }
1023
1024 /* Allocate a value and its contents for type TYPE. */
1025
1026 struct value *
1027 allocate_value (struct type *type)
1028 {
1029 struct value *val = allocate_value_lazy (type);
1030
1031 allocate_value_contents (val);
1032 val->lazy = 0;
1033 return val;
1034 }
1035
1036 /* Allocate a value that has the correct length
1037 for COUNT repetitions of type TYPE. */
1038
1039 struct value *
1040 allocate_repeat_value (struct type *type, int count)
1041 {
1042 int low_bound = current_language->string_lower_bound; /* ??? */
1043 /* FIXME-type-allocation: need a way to free this type when we are
1044 done with it. */
1045 struct type *array_type
1046 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1047
1048 return allocate_value (array_type);
1049 }
1050
1051 struct value *
1052 allocate_computed_value (struct type *type,
1053 const struct lval_funcs *funcs,
1054 void *closure)
1055 {
1056 struct value *v = allocate_value_lazy (type);
1057
1058 VALUE_LVAL (v) = lval_computed;
1059 v->location.computed.funcs = funcs;
1060 v->location.computed.closure = closure;
1061
1062 return v;
1063 }
1064
1065 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1066
1067 struct value *
1068 allocate_optimized_out_value (struct type *type)
1069 {
1070 struct value *retval = allocate_value_lazy (type);
1071
1072 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1073 set_value_lazy (retval, 0);
1074 return retval;
1075 }
1076
1077 /* Accessor methods. */
1078
1079 struct type *
1080 value_type (const struct value *value)
1081 {
1082 return value->type;
1083 }
1084 void
1085 deprecated_set_value_type (struct value *value, struct type *type)
1086 {
1087 value->type = type;
1088 }
1089
1090 LONGEST
1091 value_offset (const struct value *value)
1092 {
1093 return value->offset;
1094 }
1095 void
1096 set_value_offset (struct value *value, LONGEST offset)
1097 {
1098 value->offset = offset;
1099 }
1100
1101 LONGEST
1102 value_bitpos (const struct value *value)
1103 {
1104 return value->bitpos;
1105 }
1106 void
1107 set_value_bitpos (struct value *value, LONGEST bit)
1108 {
1109 value->bitpos = bit;
1110 }
1111
1112 LONGEST
1113 value_bitsize (const struct value *value)
1114 {
1115 return value->bitsize;
1116 }
1117 void
1118 set_value_bitsize (struct value *value, LONGEST bit)
1119 {
1120 value->bitsize = bit;
1121 }
1122
1123 struct value *
1124 value_parent (const struct value *value)
1125 {
1126 return value->parent.get ();
1127 }
1128
1129 /* See value.h. */
1130
1131 void
1132 set_value_parent (struct value *value, struct value *parent)
1133 {
1134 value->parent = value_ref_ptr (value_incref (parent));
1135 }
1136
1137 gdb_byte *
1138 value_contents_raw (struct value *value)
1139 {
1140 struct gdbarch *arch = get_value_arch (value);
1141 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1142
1143 allocate_value_contents (value);
1144 return value->contents + value->embedded_offset * unit_size;
1145 }
1146
1147 gdb_byte *
1148 value_contents_all_raw (struct value *value)
1149 {
1150 allocate_value_contents (value);
1151 return value->contents;
1152 }
1153
1154 struct type *
1155 value_enclosing_type (const struct value *value)
1156 {
1157 return value->enclosing_type;
1158 }
1159
1160 /* Look at value.h for description. */
1161
1162 struct type *
1163 value_actual_type (struct value *value, int resolve_simple_types,
1164 int *real_type_found)
1165 {
1166 struct value_print_options opts;
1167 struct type *result;
1168
1169 get_user_print_options (&opts);
1170
1171 if (real_type_found)
1172 *real_type_found = 0;
1173 result = value_type (value);
1174 if (opts.objectprint)
1175 {
1176 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1177 fetch its rtti type. */
1178 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1179 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1180 == TYPE_CODE_STRUCT
1181 && !value_optimized_out (value))
1182 {
1183 struct type *real_type;
1184
1185 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1186 if (real_type)
1187 {
1188 if (real_type_found)
1189 *real_type_found = 1;
1190 result = real_type;
1191 }
1192 }
1193 else if (resolve_simple_types)
1194 {
1195 if (real_type_found)
1196 *real_type_found = 1;
1197 result = value_enclosing_type (value);
1198 }
1199 }
1200
1201 return result;
1202 }
1203
1204 void
1205 error_value_optimized_out (void)
1206 {
1207 error (_("value has been optimized out"));
1208 }
1209
1210 static void
1211 require_not_optimized_out (const struct value *value)
1212 {
1213 if (!VEC_empty (range_s, value->optimized_out))
1214 {
1215 if (value->lval == lval_register)
1216 error (_("register has not been saved in frame"));
1217 else
1218 error_value_optimized_out ();
1219 }
1220 }
1221
1222 static void
1223 require_available (const struct value *value)
1224 {
1225 if (!VEC_empty (range_s, value->unavailable))
1226 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1227 }
1228
1229 const gdb_byte *
1230 value_contents_for_printing (struct value *value)
1231 {
1232 if (value->lazy)
1233 value_fetch_lazy (value);
1234 return value->contents;
1235 }
1236
1237 const gdb_byte *
1238 value_contents_for_printing_const (const struct value *value)
1239 {
1240 gdb_assert (!value->lazy);
1241 return value->contents;
1242 }
1243
1244 const gdb_byte *
1245 value_contents_all (struct value *value)
1246 {
1247 const gdb_byte *result = value_contents_for_printing (value);
1248 require_not_optimized_out (value);
1249 require_available (value);
1250 return result;
1251 }
1252
1253 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1254 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1255
1256 static void
1257 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1258 VEC (range_s) *src_range, int src_bit_offset,
1259 int bit_length)
1260 {
1261 range_s *r;
1262 int i;
1263
1264 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1265 {
1266 ULONGEST h, l;
1267
1268 l = std::max (r->offset, (LONGEST) src_bit_offset);
1269 h = std::min (r->offset + r->length,
1270 (LONGEST) src_bit_offset + bit_length);
1271
1272 if (l < h)
1273 insert_into_bit_range_vector (dst_range,
1274 dst_bit_offset + (l - src_bit_offset),
1275 h - l);
1276 }
1277 }
1278
1279 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1280 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1281
1282 static void
1283 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1284 const struct value *src, int src_bit_offset,
1285 int bit_length)
1286 {
1287 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1288 src->unavailable, src_bit_offset,
1289 bit_length);
1290 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1291 src->optimized_out, src_bit_offset,
1292 bit_length);
1293 }
1294
1295 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1296 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1297 contents, starting at DST_OFFSET. If unavailable contents are
1298 being copied from SRC, the corresponding DST contents are marked
1299 unavailable accordingly. Neither DST nor SRC may be lazy
1300 values.
1301
1302 It is assumed the contents of DST in the [DST_OFFSET,
1303 DST_OFFSET+LENGTH) range are wholly available. */
1304
1305 void
1306 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1307 struct value *src, LONGEST src_offset, LONGEST length)
1308 {
1309 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1310 struct gdbarch *arch = get_value_arch (src);
1311 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1312
1313 /* A lazy DST would make that this copy operation useless, since as
1314 soon as DST's contents were un-lazied (by a later value_contents
1315 call, say), the contents would be overwritten. A lazy SRC would
1316 mean we'd be copying garbage. */
1317 gdb_assert (!dst->lazy && !src->lazy);
1318
1319 /* The overwritten DST range gets unavailability ORed in, not
1320 replaced. Make sure to remember to implement replacing if it
1321 turns out actually necessary. */
1322 gdb_assert (value_bytes_available (dst, dst_offset, length));
1323 gdb_assert (!value_bits_any_optimized_out (dst,
1324 TARGET_CHAR_BIT * dst_offset,
1325 TARGET_CHAR_BIT * length));
1326
1327 /* Copy the data. */
1328 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1329 value_contents_all_raw (src) + src_offset * unit_size,
1330 length * unit_size);
1331
1332 /* Copy the meta-data, adjusted. */
1333 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1334 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1335 bit_length = length * unit_size * HOST_CHAR_BIT;
1336
1337 value_ranges_copy_adjusted (dst, dst_bit_offset,
1338 src, src_bit_offset,
1339 bit_length);
1340 }
1341
1342 /* Copy LENGTH bytes of SRC value's (all) contents
1343 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1344 (all) contents, starting at DST_OFFSET. If unavailable contents
1345 are being copied from SRC, the corresponding DST contents are
1346 marked unavailable accordingly. DST must not be lazy. If SRC is
1347 lazy, it will be fetched now.
1348
1349 It is assumed the contents of DST in the [DST_OFFSET,
1350 DST_OFFSET+LENGTH) range are wholly available. */
1351
1352 void
1353 value_contents_copy (struct value *dst, LONGEST dst_offset,
1354 struct value *src, LONGEST src_offset, LONGEST length)
1355 {
1356 if (src->lazy)
1357 value_fetch_lazy (src);
1358
1359 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1360 }
1361
1362 int
1363 value_lazy (const struct value *value)
1364 {
1365 return value->lazy;
1366 }
1367
1368 void
1369 set_value_lazy (struct value *value, int val)
1370 {
1371 value->lazy = val;
1372 }
1373
1374 int
1375 value_stack (const struct value *value)
1376 {
1377 return value->stack;
1378 }
1379
1380 void
1381 set_value_stack (struct value *value, int val)
1382 {
1383 value->stack = val;
1384 }
1385
1386 const gdb_byte *
1387 value_contents (struct value *value)
1388 {
1389 const gdb_byte *result = value_contents_writeable (value);
1390 require_not_optimized_out (value);
1391 require_available (value);
1392 return result;
1393 }
1394
1395 gdb_byte *
1396 value_contents_writeable (struct value *value)
1397 {
1398 if (value->lazy)
1399 value_fetch_lazy (value);
1400 return value_contents_raw (value);
1401 }
1402
1403 int
1404 value_optimized_out (struct value *value)
1405 {
1406 /* We can only know if a value is optimized out once we have tried to
1407 fetch it. */
1408 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1409 {
1410 TRY
1411 {
1412 value_fetch_lazy (value);
1413 }
1414 CATCH (ex, RETURN_MASK_ERROR)
1415 {
1416 /* Fall back to checking value->optimized_out. */
1417 }
1418 END_CATCH
1419 }
1420
1421 return !VEC_empty (range_s, value->optimized_out);
1422 }
1423
1424 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1425 the following LENGTH bytes. */
1426
1427 void
1428 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1429 {
1430 mark_value_bits_optimized_out (value,
1431 offset * TARGET_CHAR_BIT,
1432 length * TARGET_CHAR_BIT);
1433 }
1434
1435 /* See value.h. */
1436
1437 void
1438 mark_value_bits_optimized_out (struct value *value,
1439 LONGEST offset, LONGEST length)
1440 {
1441 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1442 }
1443
1444 int
1445 value_bits_synthetic_pointer (const struct value *value,
1446 LONGEST offset, LONGEST length)
1447 {
1448 if (value->lval != lval_computed
1449 || !value->location.computed.funcs->check_synthetic_pointer)
1450 return 0;
1451 return value->location.computed.funcs->check_synthetic_pointer (value,
1452 offset,
1453 length);
1454 }
1455
1456 LONGEST
1457 value_embedded_offset (const struct value *value)
1458 {
1459 return value->embedded_offset;
1460 }
1461
1462 void
1463 set_value_embedded_offset (struct value *value, LONGEST val)
1464 {
1465 value->embedded_offset = val;
1466 }
1467
1468 LONGEST
1469 value_pointed_to_offset (const struct value *value)
1470 {
1471 return value->pointed_to_offset;
1472 }
1473
1474 void
1475 set_value_pointed_to_offset (struct value *value, LONGEST val)
1476 {
1477 value->pointed_to_offset = val;
1478 }
1479
1480 const struct lval_funcs *
1481 value_computed_funcs (const struct value *v)
1482 {
1483 gdb_assert (value_lval_const (v) == lval_computed);
1484
1485 return v->location.computed.funcs;
1486 }
1487
1488 void *
1489 value_computed_closure (const struct value *v)
1490 {
1491 gdb_assert (v->lval == lval_computed);
1492
1493 return v->location.computed.closure;
1494 }
1495
1496 enum lval_type *
1497 deprecated_value_lval_hack (struct value *value)
1498 {
1499 return &value->lval;
1500 }
1501
1502 enum lval_type
1503 value_lval_const (const struct value *value)
1504 {
1505 return value->lval;
1506 }
1507
1508 CORE_ADDR
1509 value_address (const struct value *value)
1510 {
1511 if (value->lval != lval_memory)
1512 return 0;
1513 if (value->parent != NULL)
1514 return value_address (value->parent.get ()) + value->offset;
1515 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1516 {
1517 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1518 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1519 }
1520
1521 return value->location.address + value->offset;
1522 }
1523
1524 CORE_ADDR
1525 value_raw_address (const struct value *value)
1526 {
1527 if (value->lval != lval_memory)
1528 return 0;
1529 return value->location.address;
1530 }
1531
1532 void
1533 set_value_address (struct value *value, CORE_ADDR addr)
1534 {
1535 gdb_assert (value->lval == lval_memory);
1536 value->location.address = addr;
1537 }
1538
1539 struct internalvar **
1540 deprecated_value_internalvar_hack (struct value *value)
1541 {
1542 return &value->location.internalvar;
1543 }
1544
1545 struct frame_id *
1546 deprecated_value_next_frame_id_hack (struct value *value)
1547 {
1548 gdb_assert (value->lval == lval_register);
1549 return &value->location.reg.next_frame_id;
1550 }
1551
1552 int *
1553 deprecated_value_regnum_hack (struct value *value)
1554 {
1555 gdb_assert (value->lval == lval_register);
1556 return &value->location.reg.regnum;
1557 }
1558
1559 int
1560 deprecated_value_modifiable (const struct value *value)
1561 {
1562 return value->modifiable;
1563 }
1564 \f
1565 /* Return a mark in the value chain. All values allocated after the
1566 mark is obtained (except for those released) are subject to being freed
1567 if a subsequent value_free_to_mark is passed the mark. */
1568 struct value *
1569 value_mark (void)
1570 {
1571 if (all_values.empty ())
1572 return nullptr;
1573 return all_values.back ().get ();
1574 }
1575
1576 /* Take a reference to VAL. VAL will not be deallocated until all
1577 references are released. */
1578
1579 struct value *
1580 value_incref (struct value *val)
1581 {
1582 val->reference_count++;
1583 return val;
1584 }
1585
1586 /* Release a reference to VAL, which was acquired with value_incref.
1587 This function is also called to deallocate values from the value
1588 chain. */
1589
1590 void
1591 value_decref (struct value *val)
1592 {
1593 if (val != nullptr)
1594 {
1595 gdb_assert (val->reference_count > 0);
1596 val->reference_count--;
1597 if (val->reference_count == 0)
1598 delete val;
1599 }
1600 }
1601
1602 /* Free all values allocated since MARK was obtained by value_mark
1603 (except for those released). */
1604 void
1605 value_free_to_mark (const struct value *mark)
1606 {
1607 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1608 if (iter == all_values.end ())
1609 all_values.clear ();
1610 else
1611 all_values.erase (iter + 1, all_values.end ());
1612 }
1613
1614 /* Remove VAL from the chain all_values
1615 so it will not be freed automatically. */
1616
1617 value_ref_ptr
1618 release_value (struct value *val)
1619 {
1620 struct value *v;
1621
1622 if (val == nullptr)
1623 return value_ref_ptr ();
1624
1625 std::vector<value_ref_ptr>::reverse_iterator iter;
1626 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1627 {
1628 if (*iter == val)
1629 {
1630 value_ref_ptr result = *iter;
1631 all_values.erase (iter.base () - 1);
1632 return result;
1633 }
1634 }
1635
1636 /* We must always return an owned reference. Normally this happens
1637 because we transfer the reference from the value chain, but in
1638 this case the value was not on the chain. */
1639 return value_ref_ptr (value_incref (val));
1640 }
1641
1642 /* See value.h. */
1643
1644 std::vector<value_ref_ptr>
1645 value_release_to_mark (const struct value *mark)
1646 {
1647 std::vector<value_ref_ptr> result;
1648
1649 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1650 if (iter == all_values.end ())
1651 std::swap (result, all_values);
1652 else
1653 {
1654 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1655 all_values.erase (iter + 1, all_values.end ());
1656 }
1657 std::reverse (result.begin (), result.end ());
1658 return result;
1659 }
1660
1661 /* Return a copy of the value ARG.
1662 It contains the same contents, for same memory address,
1663 but it's a different block of storage. */
1664
1665 struct value *
1666 value_copy (struct value *arg)
1667 {
1668 struct type *encl_type = value_enclosing_type (arg);
1669 struct value *val;
1670
1671 if (value_lazy (arg))
1672 val = allocate_value_lazy (encl_type);
1673 else
1674 val = allocate_value (encl_type);
1675 val->type = arg->type;
1676 VALUE_LVAL (val) = VALUE_LVAL (arg);
1677 val->location = arg->location;
1678 val->offset = arg->offset;
1679 val->bitpos = arg->bitpos;
1680 val->bitsize = arg->bitsize;
1681 val->lazy = arg->lazy;
1682 val->embedded_offset = value_embedded_offset (arg);
1683 val->pointed_to_offset = arg->pointed_to_offset;
1684 val->modifiable = arg->modifiable;
1685 if (!value_lazy (val))
1686 {
1687 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1688 TYPE_LENGTH (value_enclosing_type (arg)));
1689
1690 }
1691 val->unavailable = VEC_copy (range_s, arg->unavailable);
1692 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1693 val->parent = arg->parent;
1694 if (VALUE_LVAL (val) == lval_computed)
1695 {
1696 const struct lval_funcs *funcs = val->location.computed.funcs;
1697
1698 if (funcs->copy_closure)
1699 val->location.computed.closure = funcs->copy_closure (val);
1700 }
1701 return val;
1702 }
1703
1704 /* Return a "const" and/or "volatile" qualified version of the value V.
1705 If CNST is true, then the returned value will be qualified with
1706 "const".
1707 if VOLTL is true, then the returned value will be qualified with
1708 "volatile". */
1709
1710 struct value *
1711 make_cv_value (int cnst, int voltl, struct value *v)
1712 {
1713 struct type *val_type = value_type (v);
1714 struct type *enclosing_type = value_enclosing_type (v);
1715 struct value *cv_val = value_copy (v);
1716
1717 deprecated_set_value_type (cv_val,
1718 make_cv_type (cnst, voltl, val_type, NULL));
1719 set_value_enclosing_type (cv_val,
1720 make_cv_type (cnst, voltl, enclosing_type, NULL));
1721
1722 return cv_val;
1723 }
1724
1725 /* Return a version of ARG that is non-lvalue. */
1726
1727 struct value *
1728 value_non_lval (struct value *arg)
1729 {
1730 if (VALUE_LVAL (arg) != not_lval)
1731 {
1732 struct type *enc_type = value_enclosing_type (arg);
1733 struct value *val = allocate_value (enc_type);
1734
1735 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1736 TYPE_LENGTH (enc_type));
1737 val->type = arg->type;
1738 set_value_embedded_offset (val, value_embedded_offset (arg));
1739 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1740 return val;
1741 }
1742 return arg;
1743 }
1744
1745 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1746
1747 void
1748 value_force_lval (struct value *v, CORE_ADDR addr)
1749 {
1750 gdb_assert (VALUE_LVAL (v) == not_lval);
1751
1752 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1753 v->lval = lval_memory;
1754 v->location.address = addr;
1755 }
1756
1757 void
1758 set_value_component_location (struct value *component,
1759 const struct value *whole)
1760 {
1761 struct type *type;
1762
1763 gdb_assert (whole->lval != lval_xcallable);
1764
1765 if (whole->lval == lval_internalvar)
1766 VALUE_LVAL (component) = lval_internalvar_component;
1767 else
1768 VALUE_LVAL (component) = whole->lval;
1769
1770 component->location = whole->location;
1771 if (whole->lval == lval_computed)
1772 {
1773 const struct lval_funcs *funcs = whole->location.computed.funcs;
1774
1775 if (funcs->copy_closure)
1776 component->location.computed.closure = funcs->copy_closure (whole);
1777 }
1778
1779 /* If type has a dynamic resolved location property
1780 update it's value address. */
1781 type = value_type (whole);
1782 if (NULL != TYPE_DATA_LOCATION (type)
1783 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1784 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1785 }
1786
1787 /* Access to the value history. */
1788
1789 /* Record a new value in the value history.
1790 Returns the absolute history index of the entry. */
1791
1792 int
1793 record_latest_value (struct value *val)
1794 {
1795 int i;
1796
1797 /* We don't want this value to have anything to do with the inferior anymore.
1798 In particular, "set $1 = 50" should not affect the variable from which
1799 the value was taken, and fast watchpoints should be able to assume that
1800 a value on the value history never changes. */
1801 if (value_lazy (val))
1802 value_fetch_lazy (val);
1803 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1804 from. This is a bit dubious, because then *&$1 does not just return $1
1805 but the current contents of that location. c'est la vie... */
1806 val->modifiable = 0;
1807
1808 value_history.push_back (release_value (val));
1809
1810 return value_history.size ();
1811 }
1812
1813 /* Return a copy of the value in the history with sequence number NUM. */
1814
1815 struct value *
1816 access_value_history (int num)
1817 {
1818 int i;
1819 int absnum = num;
1820
1821 if (absnum <= 0)
1822 absnum += value_history.size ();
1823
1824 if (absnum <= 0)
1825 {
1826 if (num == 0)
1827 error (_("The history is empty."));
1828 else if (num == 1)
1829 error (_("There is only one value in the history."));
1830 else
1831 error (_("History does not go back to $$%d."), -num);
1832 }
1833 if (absnum > value_history.size ())
1834 error (_("History has not yet reached $%d."), absnum);
1835
1836 absnum--;
1837
1838 return value_copy (value_history[absnum].get ());
1839 }
1840
1841 static void
1842 show_values (const char *num_exp, int from_tty)
1843 {
1844 int i;
1845 struct value *val;
1846 static int num = 1;
1847
1848 if (num_exp)
1849 {
1850 /* "show values +" should print from the stored position.
1851 "show values <exp>" should print around value number <exp>. */
1852 if (num_exp[0] != '+' || num_exp[1] != '\0')
1853 num = parse_and_eval_long (num_exp) - 5;
1854 }
1855 else
1856 {
1857 /* "show values" means print the last 10 values. */
1858 num = value_history.size () - 9;
1859 }
1860
1861 if (num <= 0)
1862 num = 1;
1863
1864 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1865 {
1866 struct value_print_options opts;
1867
1868 val = access_value_history (i);
1869 printf_filtered (("$%d = "), i);
1870 get_user_print_options (&opts);
1871 value_print (val, gdb_stdout, &opts);
1872 printf_filtered (("\n"));
1873 }
1874
1875 /* The next "show values +" should start after what we just printed. */
1876 num += 10;
1877
1878 /* Hitting just return after this command should do the same thing as
1879 "show values +". If num_exp is null, this is unnecessary, since
1880 "show values +" is not useful after "show values". */
1881 if (from_tty && num_exp)
1882 set_repeat_arguments ("+");
1883 }
1884 \f
1885 enum internalvar_kind
1886 {
1887 /* The internal variable is empty. */
1888 INTERNALVAR_VOID,
1889
1890 /* The value of the internal variable is provided directly as
1891 a GDB value object. */
1892 INTERNALVAR_VALUE,
1893
1894 /* A fresh value is computed via a call-back routine on every
1895 access to the internal variable. */
1896 INTERNALVAR_MAKE_VALUE,
1897
1898 /* The internal variable holds a GDB internal convenience function. */
1899 INTERNALVAR_FUNCTION,
1900
1901 /* The variable holds an integer value. */
1902 INTERNALVAR_INTEGER,
1903
1904 /* The variable holds a GDB-provided string. */
1905 INTERNALVAR_STRING,
1906 };
1907
1908 union internalvar_data
1909 {
1910 /* A value object used with INTERNALVAR_VALUE. */
1911 struct value *value;
1912
1913 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1914 struct
1915 {
1916 /* The functions to call. */
1917 const struct internalvar_funcs *functions;
1918
1919 /* The function's user-data. */
1920 void *data;
1921 } make_value;
1922
1923 /* The internal function used with INTERNALVAR_FUNCTION. */
1924 struct
1925 {
1926 struct internal_function *function;
1927 /* True if this is the canonical name for the function. */
1928 int canonical;
1929 } fn;
1930
1931 /* An integer value used with INTERNALVAR_INTEGER. */
1932 struct
1933 {
1934 /* If type is non-NULL, it will be used as the type to generate
1935 a value for this internal variable. If type is NULL, a default
1936 integer type for the architecture is used. */
1937 struct type *type;
1938 LONGEST val;
1939 } integer;
1940
1941 /* A string value used with INTERNALVAR_STRING. */
1942 char *string;
1943 };
1944
1945 /* Internal variables. These are variables within the debugger
1946 that hold values assigned by debugger commands.
1947 The user refers to them with a '$' prefix
1948 that does not appear in the variable names stored internally. */
1949
1950 struct internalvar
1951 {
1952 struct internalvar *next;
1953 char *name;
1954
1955 /* We support various different kinds of content of an internal variable.
1956 enum internalvar_kind specifies the kind, and union internalvar_data
1957 provides the data associated with this particular kind. */
1958
1959 enum internalvar_kind kind;
1960
1961 union internalvar_data u;
1962 };
1963
1964 static struct internalvar *internalvars;
1965
1966 /* If the variable does not already exist create it and give it the
1967 value given. If no value is given then the default is zero. */
1968 static void
1969 init_if_undefined_command (const char* args, int from_tty)
1970 {
1971 struct internalvar* intvar;
1972
1973 /* Parse the expression - this is taken from set_command(). */
1974 expression_up expr = parse_expression (args);
1975
1976 /* Validate the expression.
1977 Was the expression an assignment?
1978 Or even an expression at all? */
1979 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1980 error (_("Init-if-undefined requires an assignment expression."));
1981
1982 /* Extract the variable from the parsed expression.
1983 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1984 if (expr->elts[1].opcode != OP_INTERNALVAR)
1985 error (_("The first parameter to init-if-undefined "
1986 "should be a GDB variable."));
1987 intvar = expr->elts[2].internalvar;
1988
1989 /* Only evaluate the expression if the lvalue is void.
1990 This may still fail if the expresssion is invalid. */
1991 if (intvar->kind == INTERNALVAR_VOID)
1992 evaluate_expression (expr.get ());
1993 }
1994
1995
1996 /* Look up an internal variable with name NAME. NAME should not
1997 normally include a dollar sign.
1998
1999 If the specified internal variable does not exist,
2000 the return value is NULL. */
2001
2002 struct internalvar *
2003 lookup_only_internalvar (const char *name)
2004 {
2005 struct internalvar *var;
2006
2007 for (var = internalvars; var; var = var->next)
2008 if (strcmp (var->name, name) == 0)
2009 return var;
2010
2011 return NULL;
2012 }
2013
2014 /* Complete NAME by comparing it to the names of internal
2015 variables. */
2016
2017 void
2018 complete_internalvar (completion_tracker &tracker, const char *name)
2019 {
2020 struct internalvar *var;
2021 int len;
2022
2023 len = strlen (name);
2024
2025 for (var = internalvars; var; var = var->next)
2026 if (strncmp (var->name, name, len) == 0)
2027 {
2028 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2029
2030 tracker.add_completion (std::move (copy));
2031 }
2032 }
2033
2034 /* Create an internal variable with name NAME and with a void value.
2035 NAME should not normally include a dollar sign. */
2036
2037 struct internalvar *
2038 create_internalvar (const char *name)
2039 {
2040 struct internalvar *var = XNEW (struct internalvar);
2041
2042 var->name = concat (name, (char *)NULL);
2043 var->kind = INTERNALVAR_VOID;
2044 var->next = internalvars;
2045 internalvars = var;
2046 return var;
2047 }
2048
2049 /* Create an internal variable with name NAME and register FUN as the
2050 function that value_of_internalvar uses to create a value whenever
2051 this variable is referenced. NAME should not normally include a
2052 dollar sign. DATA is passed uninterpreted to FUN when it is
2053 called. CLEANUP, if not NULL, is called when the internal variable
2054 is destroyed. It is passed DATA as its only argument. */
2055
2056 struct internalvar *
2057 create_internalvar_type_lazy (const char *name,
2058 const struct internalvar_funcs *funcs,
2059 void *data)
2060 {
2061 struct internalvar *var = create_internalvar (name);
2062
2063 var->kind = INTERNALVAR_MAKE_VALUE;
2064 var->u.make_value.functions = funcs;
2065 var->u.make_value.data = data;
2066 return var;
2067 }
2068
2069 /* See documentation in value.h. */
2070
2071 int
2072 compile_internalvar_to_ax (struct internalvar *var,
2073 struct agent_expr *expr,
2074 struct axs_value *value)
2075 {
2076 if (var->kind != INTERNALVAR_MAKE_VALUE
2077 || var->u.make_value.functions->compile_to_ax == NULL)
2078 return 0;
2079
2080 var->u.make_value.functions->compile_to_ax (var, expr, value,
2081 var->u.make_value.data);
2082 return 1;
2083 }
2084
2085 /* Look up an internal variable with name NAME. NAME should not
2086 normally include a dollar sign.
2087
2088 If the specified internal variable does not exist,
2089 one is created, with a void value. */
2090
2091 struct internalvar *
2092 lookup_internalvar (const char *name)
2093 {
2094 struct internalvar *var;
2095
2096 var = lookup_only_internalvar (name);
2097 if (var)
2098 return var;
2099
2100 return create_internalvar (name);
2101 }
2102
2103 /* Return current value of internal variable VAR. For variables that
2104 are not inherently typed, use a value type appropriate for GDBARCH. */
2105
2106 struct value *
2107 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2108 {
2109 struct value *val;
2110 struct trace_state_variable *tsv;
2111
2112 /* If there is a trace state variable of the same name, assume that
2113 is what we really want to see. */
2114 tsv = find_trace_state_variable (var->name);
2115 if (tsv)
2116 {
2117 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2118 &(tsv->value));
2119 if (tsv->value_known)
2120 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2121 tsv->value);
2122 else
2123 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2124 return val;
2125 }
2126
2127 switch (var->kind)
2128 {
2129 case INTERNALVAR_VOID:
2130 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2131 break;
2132
2133 case INTERNALVAR_FUNCTION:
2134 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2135 break;
2136
2137 case INTERNALVAR_INTEGER:
2138 if (!var->u.integer.type)
2139 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2140 var->u.integer.val);
2141 else
2142 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2143 break;
2144
2145 case INTERNALVAR_STRING:
2146 val = value_cstring (var->u.string, strlen (var->u.string),
2147 builtin_type (gdbarch)->builtin_char);
2148 break;
2149
2150 case INTERNALVAR_VALUE:
2151 val = value_copy (var->u.value);
2152 if (value_lazy (val))
2153 value_fetch_lazy (val);
2154 break;
2155
2156 case INTERNALVAR_MAKE_VALUE:
2157 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2158 var->u.make_value.data);
2159 break;
2160
2161 default:
2162 internal_error (__FILE__, __LINE__, _("bad kind"));
2163 }
2164
2165 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2166 on this value go back to affect the original internal variable.
2167
2168 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2169 no underlying modifyable state in the internal variable.
2170
2171 Likewise, if the variable's value is a computed lvalue, we want
2172 references to it to produce another computed lvalue, where
2173 references and assignments actually operate through the
2174 computed value's functions.
2175
2176 This means that internal variables with computed values
2177 behave a little differently from other internal variables:
2178 assignments to them don't just replace the previous value
2179 altogether. At the moment, this seems like the behavior we
2180 want. */
2181
2182 if (var->kind != INTERNALVAR_MAKE_VALUE
2183 && val->lval != lval_computed)
2184 {
2185 VALUE_LVAL (val) = lval_internalvar;
2186 VALUE_INTERNALVAR (val) = var;
2187 }
2188
2189 return val;
2190 }
2191
2192 int
2193 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2194 {
2195 if (var->kind == INTERNALVAR_INTEGER)
2196 {
2197 *result = var->u.integer.val;
2198 return 1;
2199 }
2200
2201 if (var->kind == INTERNALVAR_VALUE)
2202 {
2203 struct type *type = check_typedef (value_type (var->u.value));
2204
2205 if (TYPE_CODE (type) == TYPE_CODE_INT)
2206 {
2207 *result = value_as_long (var->u.value);
2208 return 1;
2209 }
2210 }
2211
2212 return 0;
2213 }
2214
2215 static int
2216 get_internalvar_function (struct internalvar *var,
2217 struct internal_function **result)
2218 {
2219 switch (var->kind)
2220 {
2221 case INTERNALVAR_FUNCTION:
2222 *result = var->u.fn.function;
2223 return 1;
2224
2225 default:
2226 return 0;
2227 }
2228 }
2229
2230 void
2231 set_internalvar_component (struct internalvar *var,
2232 LONGEST offset, LONGEST bitpos,
2233 LONGEST bitsize, struct value *newval)
2234 {
2235 gdb_byte *addr;
2236 struct gdbarch *arch;
2237 int unit_size;
2238
2239 switch (var->kind)
2240 {
2241 case INTERNALVAR_VALUE:
2242 addr = value_contents_writeable (var->u.value);
2243 arch = get_value_arch (var->u.value);
2244 unit_size = gdbarch_addressable_memory_unit_size (arch);
2245
2246 if (bitsize)
2247 modify_field (value_type (var->u.value), addr + offset,
2248 value_as_long (newval), bitpos, bitsize);
2249 else
2250 memcpy (addr + offset * unit_size, value_contents (newval),
2251 TYPE_LENGTH (value_type (newval)));
2252 break;
2253
2254 default:
2255 /* We can never get a component of any other kind. */
2256 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2257 }
2258 }
2259
2260 void
2261 set_internalvar (struct internalvar *var, struct value *val)
2262 {
2263 enum internalvar_kind new_kind;
2264 union internalvar_data new_data = { 0 };
2265
2266 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2267 error (_("Cannot overwrite convenience function %s"), var->name);
2268
2269 /* Prepare new contents. */
2270 switch (TYPE_CODE (check_typedef (value_type (val))))
2271 {
2272 case TYPE_CODE_VOID:
2273 new_kind = INTERNALVAR_VOID;
2274 break;
2275
2276 case TYPE_CODE_INTERNAL_FUNCTION:
2277 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2278 new_kind = INTERNALVAR_FUNCTION;
2279 get_internalvar_function (VALUE_INTERNALVAR (val),
2280 &new_data.fn.function);
2281 /* Copies created here are never canonical. */
2282 break;
2283
2284 default:
2285 new_kind = INTERNALVAR_VALUE;
2286 new_data.value = value_copy (val);
2287 new_data.value->modifiable = 1;
2288
2289 /* Force the value to be fetched from the target now, to avoid problems
2290 later when this internalvar is referenced and the target is gone or
2291 has changed. */
2292 if (value_lazy (new_data.value))
2293 value_fetch_lazy (new_data.value);
2294
2295 /* Release the value from the value chain to prevent it from being
2296 deleted by free_all_values. From here on this function should not
2297 call error () until new_data is installed into the var->u to avoid
2298 leaking memory. */
2299 release_value (new_data.value).release ();
2300
2301 /* Internal variables which are created from values with a dynamic
2302 location don't need the location property of the origin anymore.
2303 The resolved dynamic location is used prior then any other address
2304 when accessing the value.
2305 If we keep it, we would still refer to the origin value.
2306 Remove the location property in case it exist. */
2307 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2308
2309 break;
2310 }
2311
2312 /* Clean up old contents. */
2313 clear_internalvar (var);
2314
2315 /* Switch over. */
2316 var->kind = new_kind;
2317 var->u = new_data;
2318 /* End code which must not call error(). */
2319 }
2320
2321 void
2322 set_internalvar_integer (struct internalvar *var, LONGEST l)
2323 {
2324 /* Clean up old contents. */
2325 clear_internalvar (var);
2326
2327 var->kind = INTERNALVAR_INTEGER;
2328 var->u.integer.type = NULL;
2329 var->u.integer.val = l;
2330 }
2331
2332 void
2333 set_internalvar_string (struct internalvar *var, const char *string)
2334 {
2335 /* Clean up old contents. */
2336 clear_internalvar (var);
2337
2338 var->kind = INTERNALVAR_STRING;
2339 var->u.string = xstrdup (string);
2340 }
2341
2342 static void
2343 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2344 {
2345 /* Clean up old contents. */
2346 clear_internalvar (var);
2347
2348 var->kind = INTERNALVAR_FUNCTION;
2349 var->u.fn.function = f;
2350 var->u.fn.canonical = 1;
2351 /* Variables installed here are always the canonical version. */
2352 }
2353
2354 void
2355 clear_internalvar (struct internalvar *var)
2356 {
2357 /* Clean up old contents. */
2358 switch (var->kind)
2359 {
2360 case INTERNALVAR_VALUE:
2361 value_decref (var->u.value);
2362 break;
2363
2364 case INTERNALVAR_STRING:
2365 xfree (var->u.string);
2366 break;
2367
2368 case INTERNALVAR_MAKE_VALUE:
2369 if (var->u.make_value.functions->destroy != NULL)
2370 var->u.make_value.functions->destroy (var->u.make_value.data);
2371 break;
2372
2373 default:
2374 break;
2375 }
2376
2377 /* Reset to void kind. */
2378 var->kind = INTERNALVAR_VOID;
2379 }
2380
2381 char *
2382 internalvar_name (const struct internalvar *var)
2383 {
2384 return var->name;
2385 }
2386
2387 static struct internal_function *
2388 create_internal_function (const char *name,
2389 internal_function_fn handler, void *cookie)
2390 {
2391 struct internal_function *ifn = XNEW (struct internal_function);
2392
2393 ifn->name = xstrdup (name);
2394 ifn->handler = handler;
2395 ifn->cookie = cookie;
2396 return ifn;
2397 }
2398
2399 char *
2400 value_internal_function_name (struct value *val)
2401 {
2402 struct internal_function *ifn;
2403 int result;
2404
2405 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2406 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2407 gdb_assert (result);
2408
2409 return ifn->name;
2410 }
2411
2412 struct value *
2413 call_internal_function (struct gdbarch *gdbarch,
2414 const struct language_defn *language,
2415 struct value *func, int argc, struct value **argv)
2416 {
2417 struct internal_function *ifn;
2418 int result;
2419
2420 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2421 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2422 gdb_assert (result);
2423
2424 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2425 }
2426
2427 /* The 'function' command. This does nothing -- it is just a
2428 placeholder to let "help function NAME" work. This is also used as
2429 the implementation of the sub-command that is created when
2430 registering an internal function. */
2431 static void
2432 function_command (const char *command, int from_tty)
2433 {
2434 /* Do nothing. */
2435 }
2436
2437 /* Clean up if an internal function's command is destroyed. */
2438 static void
2439 function_destroyer (struct cmd_list_element *self, void *ignore)
2440 {
2441 xfree ((char *) self->name);
2442 xfree ((char *) self->doc);
2443 }
2444
2445 /* Add a new internal function. NAME is the name of the function; DOC
2446 is a documentation string describing the function. HANDLER is
2447 called when the function is invoked. COOKIE is an arbitrary
2448 pointer which is passed to HANDLER and is intended for "user
2449 data". */
2450 void
2451 add_internal_function (const char *name, const char *doc,
2452 internal_function_fn handler, void *cookie)
2453 {
2454 struct cmd_list_element *cmd;
2455 struct internal_function *ifn;
2456 struct internalvar *var = lookup_internalvar (name);
2457
2458 ifn = create_internal_function (name, handler, cookie);
2459 set_internalvar_function (var, ifn);
2460
2461 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2462 &functionlist);
2463 cmd->destroyer = function_destroyer;
2464 }
2465
2466 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2467 prevent cycles / duplicates. */
2468
2469 void
2470 preserve_one_value (struct value *value, struct objfile *objfile,
2471 htab_t copied_types)
2472 {
2473 if (TYPE_OBJFILE (value->type) == objfile)
2474 value->type = copy_type_recursive (objfile, value->type, copied_types);
2475
2476 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2477 value->enclosing_type = copy_type_recursive (objfile,
2478 value->enclosing_type,
2479 copied_types);
2480 }
2481
2482 /* Likewise for internal variable VAR. */
2483
2484 static void
2485 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2486 htab_t copied_types)
2487 {
2488 switch (var->kind)
2489 {
2490 case INTERNALVAR_INTEGER:
2491 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2492 var->u.integer.type
2493 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2494 break;
2495
2496 case INTERNALVAR_VALUE:
2497 preserve_one_value (var->u.value, objfile, copied_types);
2498 break;
2499 }
2500 }
2501
2502 /* Update the internal variables and value history when OBJFILE is
2503 discarded; we must copy the types out of the objfile. New global types
2504 will be created for every convenience variable which currently points to
2505 this objfile's types, and the convenience variables will be adjusted to
2506 use the new global types. */
2507
2508 void
2509 preserve_values (struct objfile *objfile)
2510 {
2511 htab_t copied_types;
2512 struct internalvar *var;
2513 int i;
2514
2515 /* Create the hash table. We allocate on the objfile's obstack, since
2516 it is soon to be deleted. */
2517 copied_types = create_copied_types_hash (objfile);
2518
2519 for (const value_ref_ptr &item : value_history)
2520 preserve_one_value (item.get (), objfile, copied_types);
2521
2522 for (var = internalvars; var; var = var->next)
2523 preserve_one_internalvar (var, objfile, copied_types);
2524
2525 preserve_ext_lang_values (objfile, copied_types);
2526
2527 htab_delete (copied_types);
2528 }
2529
2530 static void
2531 show_convenience (const char *ignore, int from_tty)
2532 {
2533 struct gdbarch *gdbarch = get_current_arch ();
2534 struct internalvar *var;
2535 int varseen = 0;
2536 struct value_print_options opts;
2537
2538 get_user_print_options (&opts);
2539 for (var = internalvars; var; var = var->next)
2540 {
2541
2542 if (!varseen)
2543 {
2544 varseen = 1;
2545 }
2546 printf_filtered (("$%s = "), var->name);
2547
2548 TRY
2549 {
2550 struct value *val;
2551
2552 val = value_of_internalvar (gdbarch, var);
2553 value_print (val, gdb_stdout, &opts);
2554 }
2555 CATCH (ex, RETURN_MASK_ERROR)
2556 {
2557 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2558 }
2559 END_CATCH
2560
2561 printf_filtered (("\n"));
2562 }
2563 if (!varseen)
2564 {
2565 /* This text does not mention convenience functions on purpose.
2566 The user can't create them except via Python, and if Python support
2567 is installed this message will never be printed ($_streq will
2568 exist). */
2569 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2570 "Convenience variables have "
2571 "names starting with \"$\";\n"
2572 "use \"set\" as in \"set "
2573 "$foo = 5\" to define them.\n"));
2574 }
2575 }
2576 \f
2577
2578 /* See value.h. */
2579
2580 struct value *
2581 value_from_xmethod (xmethod_worker_up &&worker)
2582 {
2583 struct value *v;
2584
2585 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2586 v->lval = lval_xcallable;
2587 v->location.xm_worker = worker.release ();
2588 v->modifiable = 0;
2589
2590 return v;
2591 }
2592
2593 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2594
2595 struct type *
2596 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2597 {
2598 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2599 && method->lval == lval_xcallable && argc > 0);
2600
2601 return method->location.xm_worker->get_result_type
2602 (argv[0], argv + 1, argc - 1);
2603 }
2604
2605 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2606
2607 struct value *
2608 call_xmethod (struct value *method, int argc, struct value **argv)
2609 {
2610 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2611 && method->lval == lval_xcallable && argc > 0);
2612
2613 return method->location.xm_worker->invoke (argv[0], argv + 1, argc - 1);
2614 }
2615 \f
2616 /* Extract a value as a C number (either long or double).
2617 Knows how to convert fixed values to double, or
2618 floating values to long.
2619 Does not deallocate the value. */
2620
2621 LONGEST
2622 value_as_long (struct value *val)
2623 {
2624 /* This coerces arrays and functions, which is necessary (e.g.
2625 in disassemble_command). It also dereferences references, which
2626 I suspect is the most logical thing to do. */
2627 val = coerce_array (val);
2628 return unpack_long (value_type (val), value_contents (val));
2629 }
2630
2631 /* Extract a value as a C pointer. Does not deallocate the value.
2632 Note that val's type may not actually be a pointer; value_as_long
2633 handles all the cases. */
2634 CORE_ADDR
2635 value_as_address (struct value *val)
2636 {
2637 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2638
2639 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2640 whether we want this to be true eventually. */
2641 #if 0
2642 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2643 non-address (e.g. argument to "signal", "info break", etc.), or
2644 for pointers to char, in which the low bits *are* significant. */
2645 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2646 #else
2647
2648 /* There are several targets (IA-64, PowerPC, and others) which
2649 don't represent pointers to functions as simply the address of
2650 the function's entry point. For example, on the IA-64, a
2651 function pointer points to a two-word descriptor, generated by
2652 the linker, which contains the function's entry point, and the
2653 value the IA-64 "global pointer" register should have --- to
2654 support position-independent code. The linker generates
2655 descriptors only for those functions whose addresses are taken.
2656
2657 On such targets, it's difficult for GDB to convert an arbitrary
2658 function address into a function pointer; it has to either find
2659 an existing descriptor for that function, or call malloc and
2660 build its own. On some targets, it is impossible for GDB to
2661 build a descriptor at all: the descriptor must contain a jump
2662 instruction; data memory cannot be executed; and code memory
2663 cannot be modified.
2664
2665 Upon entry to this function, if VAL is a value of type `function'
2666 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2667 value_address (val) is the address of the function. This is what
2668 you'll get if you evaluate an expression like `main'. The call
2669 to COERCE_ARRAY below actually does all the usual unary
2670 conversions, which includes converting values of type `function'
2671 to `pointer to function'. This is the challenging conversion
2672 discussed above. Then, `unpack_long' will convert that pointer
2673 back into an address.
2674
2675 So, suppose the user types `disassemble foo' on an architecture
2676 with a strange function pointer representation, on which GDB
2677 cannot build its own descriptors, and suppose further that `foo'
2678 has no linker-built descriptor. The address->pointer conversion
2679 will signal an error and prevent the command from running, even
2680 though the next step would have been to convert the pointer
2681 directly back into the same address.
2682
2683 The following shortcut avoids this whole mess. If VAL is a
2684 function, just return its address directly. */
2685 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2686 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2687 return value_address (val);
2688
2689 val = coerce_array (val);
2690
2691 /* Some architectures (e.g. Harvard), map instruction and data
2692 addresses onto a single large unified address space. For
2693 instance: An architecture may consider a large integer in the
2694 range 0x10000000 .. 0x1000ffff to already represent a data
2695 addresses (hence not need a pointer to address conversion) while
2696 a small integer would still need to be converted integer to
2697 pointer to address. Just assume such architectures handle all
2698 integer conversions in a single function. */
2699
2700 /* JimB writes:
2701
2702 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2703 must admonish GDB hackers to make sure its behavior matches the
2704 compiler's, whenever possible.
2705
2706 In general, I think GDB should evaluate expressions the same way
2707 the compiler does. When the user copies an expression out of
2708 their source code and hands it to a `print' command, they should
2709 get the same value the compiler would have computed. Any
2710 deviation from this rule can cause major confusion and annoyance,
2711 and needs to be justified carefully. In other words, GDB doesn't
2712 really have the freedom to do these conversions in clever and
2713 useful ways.
2714
2715 AndrewC pointed out that users aren't complaining about how GDB
2716 casts integers to pointers; they are complaining that they can't
2717 take an address from a disassembly listing and give it to `x/i'.
2718 This is certainly important.
2719
2720 Adding an architecture method like integer_to_address() certainly
2721 makes it possible for GDB to "get it right" in all circumstances
2722 --- the target has complete control over how things get done, so
2723 people can Do The Right Thing for their target without breaking
2724 anyone else. The standard doesn't specify how integers get
2725 converted to pointers; usually, the ABI doesn't either, but
2726 ABI-specific code is a more reasonable place to handle it. */
2727
2728 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2729 && !TYPE_IS_REFERENCE (value_type (val))
2730 && gdbarch_integer_to_address_p (gdbarch))
2731 return gdbarch_integer_to_address (gdbarch, value_type (val),
2732 value_contents (val));
2733
2734 return unpack_long (value_type (val), value_contents (val));
2735 #endif
2736 }
2737 \f
2738 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2739 as a long, or as a double, assuming the raw data is described
2740 by type TYPE. Knows how to convert different sizes of values
2741 and can convert between fixed and floating point. We don't assume
2742 any alignment for the raw data. Return value is in host byte order.
2743
2744 If you want functions and arrays to be coerced to pointers, and
2745 references to be dereferenced, call value_as_long() instead.
2746
2747 C++: It is assumed that the front-end has taken care of
2748 all matters concerning pointers to members. A pointer
2749 to member which reaches here is considered to be equivalent
2750 to an INT (or some size). After all, it is only an offset. */
2751
2752 LONGEST
2753 unpack_long (struct type *type, const gdb_byte *valaddr)
2754 {
2755 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2756 enum type_code code = TYPE_CODE (type);
2757 int len = TYPE_LENGTH (type);
2758 int nosign = TYPE_UNSIGNED (type);
2759
2760 switch (code)
2761 {
2762 case TYPE_CODE_TYPEDEF:
2763 return unpack_long (check_typedef (type), valaddr);
2764 case TYPE_CODE_ENUM:
2765 case TYPE_CODE_FLAGS:
2766 case TYPE_CODE_BOOL:
2767 case TYPE_CODE_INT:
2768 case TYPE_CODE_CHAR:
2769 case TYPE_CODE_RANGE:
2770 case TYPE_CODE_MEMBERPTR:
2771 if (nosign)
2772 return extract_unsigned_integer (valaddr, len, byte_order);
2773 else
2774 return extract_signed_integer (valaddr, len, byte_order);
2775
2776 case TYPE_CODE_FLT:
2777 case TYPE_CODE_DECFLOAT:
2778 return target_float_to_longest (valaddr, type);
2779
2780 case TYPE_CODE_PTR:
2781 case TYPE_CODE_REF:
2782 case TYPE_CODE_RVALUE_REF:
2783 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2784 whether we want this to be true eventually. */
2785 return extract_typed_address (valaddr, type);
2786
2787 default:
2788 error (_("Value can't be converted to integer."));
2789 }
2790 return 0; /* Placate lint. */
2791 }
2792
2793 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2794 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2795 We don't assume any alignment for the raw data. Return value is in
2796 host byte order.
2797
2798 If you want functions and arrays to be coerced to pointers, and
2799 references to be dereferenced, call value_as_address() instead.
2800
2801 C++: It is assumed that the front-end has taken care of
2802 all matters concerning pointers to members. A pointer
2803 to member which reaches here is considered to be equivalent
2804 to an INT (or some size). After all, it is only an offset. */
2805
2806 CORE_ADDR
2807 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2808 {
2809 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2810 whether we want this to be true eventually. */
2811 return unpack_long (type, valaddr);
2812 }
2813
2814 bool
2815 is_floating_value (struct value *val)
2816 {
2817 struct type *type = check_typedef (value_type (val));
2818
2819 if (is_floating_type (type))
2820 {
2821 if (!target_float_is_valid (value_contents (val), type))
2822 error (_("Invalid floating value found in program."));
2823 return true;
2824 }
2825
2826 return false;
2827 }
2828
2829 \f
2830 /* Get the value of the FIELDNO'th field (which must be static) of
2831 TYPE. */
2832
2833 struct value *
2834 value_static_field (struct type *type, int fieldno)
2835 {
2836 struct value *retval;
2837
2838 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2839 {
2840 case FIELD_LOC_KIND_PHYSADDR:
2841 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2842 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2843 break;
2844 case FIELD_LOC_KIND_PHYSNAME:
2845 {
2846 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2847 /* TYPE_FIELD_NAME (type, fieldno); */
2848 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2849
2850 if (sym.symbol == NULL)
2851 {
2852 /* With some compilers, e.g. HP aCC, static data members are
2853 reported as non-debuggable symbols. */
2854 struct bound_minimal_symbol msym
2855 = lookup_minimal_symbol (phys_name, NULL, NULL);
2856 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2857
2858 if (!msym.minsym)
2859 retval = allocate_optimized_out_value (field_type);
2860 else
2861 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2862 }
2863 else
2864 retval = value_of_variable (sym.symbol, sym.block);
2865 break;
2866 }
2867 default:
2868 gdb_assert_not_reached ("unexpected field location kind");
2869 }
2870
2871 return retval;
2872 }
2873
2874 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2875 You have to be careful here, since the size of the data area for the value
2876 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2877 than the old enclosing type, you have to allocate more space for the
2878 data. */
2879
2880 void
2881 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2882 {
2883 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2884 {
2885 check_type_length_before_alloc (new_encl_type);
2886 val->contents
2887 = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2888 }
2889
2890 val->enclosing_type = new_encl_type;
2891 }
2892
2893 /* Given a value ARG1 (offset by OFFSET bytes)
2894 of a struct or union type ARG_TYPE,
2895 extract and return the value of one of its (non-static) fields.
2896 FIELDNO says which field. */
2897
2898 struct value *
2899 value_primitive_field (struct value *arg1, LONGEST offset,
2900 int fieldno, struct type *arg_type)
2901 {
2902 struct value *v;
2903 struct type *type;
2904 struct gdbarch *arch = get_value_arch (arg1);
2905 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2906
2907 arg_type = check_typedef (arg_type);
2908 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2909
2910 /* Call check_typedef on our type to make sure that, if TYPE
2911 is a TYPE_CODE_TYPEDEF, its length is set to the length
2912 of the target type instead of zero. However, we do not
2913 replace the typedef type by the target type, because we want
2914 to keep the typedef in order to be able to print the type
2915 description correctly. */
2916 check_typedef (type);
2917
2918 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2919 {
2920 /* Handle packed fields.
2921
2922 Create a new value for the bitfield, with bitpos and bitsize
2923 set. If possible, arrange offset and bitpos so that we can
2924 do a single aligned read of the size of the containing type.
2925 Otherwise, adjust offset to the byte containing the first
2926 bit. Assume that the address, offset, and embedded offset
2927 are sufficiently aligned. */
2928
2929 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2930 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2931
2932 v = allocate_value_lazy (type);
2933 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2934 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2935 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2936 v->bitpos = bitpos % container_bitsize;
2937 else
2938 v->bitpos = bitpos % 8;
2939 v->offset = (value_embedded_offset (arg1)
2940 + offset
2941 + (bitpos - v->bitpos) / 8);
2942 set_value_parent (v, arg1);
2943 if (!value_lazy (arg1))
2944 value_fetch_lazy (v);
2945 }
2946 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2947 {
2948 /* This field is actually a base subobject, so preserve the
2949 entire object's contents for later references to virtual
2950 bases, etc. */
2951 LONGEST boffset;
2952
2953 /* Lazy register values with offsets are not supported. */
2954 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2955 value_fetch_lazy (arg1);
2956
2957 /* We special case virtual inheritance here because this
2958 requires access to the contents, which we would rather avoid
2959 for references to ordinary fields of unavailable values. */
2960 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2961 boffset = baseclass_offset (arg_type, fieldno,
2962 value_contents (arg1),
2963 value_embedded_offset (arg1),
2964 value_address (arg1),
2965 arg1);
2966 else
2967 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2968
2969 if (value_lazy (arg1))
2970 v = allocate_value_lazy (value_enclosing_type (arg1));
2971 else
2972 {
2973 v = allocate_value (value_enclosing_type (arg1));
2974 value_contents_copy_raw (v, 0, arg1, 0,
2975 TYPE_LENGTH (value_enclosing_type (arg1)));
2976 }
2977 v->type = type;
2978 v->offset = value_offset (arg1);
2979 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2980 }
2981 else if (NULL != TYPE_DATA_LOCATION (type))
2982 {
2983 /* Field is a dynamic data member. */
2984
2985 gdb_assert (0 == offset);
2986 /* We expect an already resolved data location. */
2987 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2988 /* For dynamic data types defer memory allocation
2989 until we actual access the value. */
2990 v = allocate_value_lazy (type);
2991 }
2992 else
2993 {
2994 /* Plain old data member */
2995 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
2996 / (HOST_CHAR_BIT * unit_size));
2997
2998 /* Lazy register values with offsets are not supported. */
2999 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3000 value_fetch_lazy (arg1);
3001
3002 if (value_lazy (arg1))
3003 v = allocate_value_lazy (type);
3004 else
3005 {
3006 v = allocate_value (type);
3007 value_contents_copy_raw (v, value_embedded_offset (v),
3008 arg1, value_embedded_offset (arg1) + offset,
3009 type_length_units (type));
3010 }
3011 v->offset = (value_offset (arg1) + offset
3012 + value_embedded_offset (arg1));
3013 }
3014 set_value_component_location (v, arg1);
3015 return v;
3016 }
3017
3018 /* Given a value ARG1 of a struct or union type,
3019 extract and return the value of one of its (non-static) fields.
3020 FIELDNO says which field. */
3021
3022 struct value *
3023 value_field (struct value *arg1, int fieldno)
3024 {
3025 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3026 }
3027
3028 /* Return a non-virtual function as a value.
3029 F is the list of member functions which contains the desired method.
3030 J is an index into F which provides the desired method.
3031
3032 We only use the symbol for its address, so be happy with either a
3033 full symbol or a minimal symbol. */
3034
3035 struct value *
3036 value_fn_field (struct value **arg1p, struct fn_field *f,
3037 int j, struct type *type,
3038 LONGEST offset)
3039 {
3040 struct value *v;
3041 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3042 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3043 struct symbol *sym;
3044 struct bound_minimal_symbol msym;
3045
3046 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3047 if (sym != NULL)
3048 {
3049 memset (&msym, 0, sizeof (msym));
3050 }
3051 else
3052 {
3053 gdb_assert (sym == NULL);
3054 msym = lookup_bound_minimal_symbol (physname);
3055 if (msym.minsym == NULL)
3056 return NULL;
3057 }
3058
3059 v = allocate_value (ftype);
3060 VALUE_LVAL (v) = lval_memory;
3061 if (sym)
3062 {
3063 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3064 }
3065 else
3066 {
3067 /* The minimal symbol might point to a function descriptor;
3068 resolve it to the actual code address instead. */
3069 struct objfile *objfile = msym.objfile;
3070 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3071
3072 set_value_address (v,
3073 gdbarch_convert_from_func_ptr_addr
3074 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3075 }
3076
3077 if (arg1p)
3078 {
3079 if (type != value_type (*arg1p))
3080 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3081 value_addr (*arg1p)));
3082
3083 /* Move the `this' pointer according to the offset.
3084 VALUE_OFFSET (*arg1p) += offset; */
3085 }
3086
3087 return v;
3088 }
3089
3090 \f
3091
3092 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3093 VALADDR, and store the result in *RESULT.
3094 The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3095 BITSIZE is zero, then the length is taken from FIELD_TYPE.
3096
3097 Extracting bits depends on endianness of the machine. Compute the
3098 number of least significant bits to discard. For big endian machines,
3099 we compute the total number of bits in the anonymous object, subtract
3100 off the bit count from the MSB of the object to the MSB of the
3101 bitfield, then the size of the bitfield, which leaves the LSB discard
3102 count. For little endian machines, the discard count is simply the
3103 number of bits from the LSB of the anonymous object to the LSB of the
3104 bitfield.
3105
3106 If the field is signed, we also do sign extension. */
3107
3108 static LONGEST
3109 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3110 LONGEST bitpos, LONGEST bitsize)
3111 {
3112 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3113 ULONGEST val;
3114 ULONGEST valmask;
3115 int lsbcount;
3116 LONGEST bytes_read;
3117 LONGEST read_offset;
3118
3119 /* Read the minimum number of bytes required; there may not be
3120 enough bytes to read an entire ULONGEST. */
3121 field_type = check_typedef (field_type);
3122 if (bitsize)
3123 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3124 else
3125 {
3126 bytes_read = TYPE_LENGTH (field_type);
3127 bitsize = 8 * bytes_read;
3128 }
3129
3130 read_offset = bitpos / 8;
3131
3132 val = extract_unsigned_integer (valaddr + read_offset,
3133 bytes_read, byte_order);
3134
3135 /* Extract bits. See comment above. */
3136
3137 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3138 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3139 else
3140 lsbcount = (bitpos % 8);
3141 val >>= lsbcount;
3142
3143 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3144 If the field is signed, and is negative, then sign extend. */
3145
3146 if (bitsize < 8 * (int) sizeof (val))
3147 {
3148 valmask = (((ULONGEST) 1) << bitsize) - 1;
3149 val &= valmask;
3150 if (!TYPE_UNSIGNED (field_type))
3151 {
3152 if (val & (valmask ^ (valmask >> 1)))
3153 {
3154 val |= ~valmask;
3155 }
3156 }
3157 }
3158
3159 return val;
3160 }
3161
3162 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3163 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3164 ORIGINAL_VALUE, which must not be NULL. See
3165 unpack_value_bits_as_long for more details. */
3166
3167 int
3168 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3169 LONGEST embedded_offset, int fieldno,
3170 const struct value *val, LONGEST *result)
3171 {
3172 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3173 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3174 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3175 int bit_offset;
3176
3177 gdb_assert (val != NULL);
3178
3179 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3180 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3181 || !value_bits_available (val, bit_offset, bitsize))
3182 return 0;
3183
3184 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3185 bitpos, bitsize);
3186 return 1;
3187 }
3188
3189 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3190 object at VALADDR. See unpack_bits_as_long for more details. */
3191
3192 LONGEST
3193 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3194 {
3195 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3196 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3197 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3198
3199 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3200 }
3201
3202 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3203 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3204 the contents in DEST_VAL, zero or sign extending if the type of
3205 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3206 VAL. If the VAL's contents required to extract the bitfield from
3207 are unavailable/optimized out, DEST_VAL is correspondingly
3208 marked unavailable/optimized out. */
3209
3210 void
3211 unpack_value_bitfield (struct value *dest_val,
3212 LONGEST bitpos, LONGEST bitsize,
3213 const gdb_byte *valaddr, LONGEST embedded_offset,
3214 const struct value *val)
3215 {
3216 enum bfd_endian byte_order;
3217 int src_bit_offset;
3218 int dst_bit_offset;
3219 struct type *field_type = value_type (dest_val);
3220
3221 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3222
3223 /* First, unpack and sign extend the bitfield as if it was wholly
3224 valid. Optimized out/unavailable bits are read as zero, but
3225 that's OK, as they'll end up marked below. If the VAL is
3226 wholly-invalid we may have skipped allocating its contents,
3227 though. See allocate_optimized_out_value. */
3228 if (valaddr != NULL)
3229 {
3230 LONGEST num;
3231
3232 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3233 bitpos, bitsize);
3234 store_signed_integer (value_contents_raw (dest_val),
3235 TYPE_LENGTH (field_type), byte_order, num);
3236 }
3237
3238 /* Now copy the optimized out / unavailability ranges to the right
3239 bits. */
3240 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3241 if (byte_order == BFD_ENDIAN_BIG)
3242 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3243 else
3244 dst_bit_offset = 0;
3245 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3246 val, src_bit_offset, bitsize);
3247 }
3248
3249 /* Return a new value with type TYPE, which is FIELDNO field of the
3250 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3251 of VAL. If the VAL's contents required to extract the bitfield
3252 from are unavailable/optimized out, the new value is
3253 correspondingly marked unavailable/optimized out. */
3254
3255 struct value *
3256 value_field_bitfield (struct type *type, int fieldno,
3257 const gdb_byte *valaddr,
3258 LONGEST embedded_offset, const struct value *val)
3259 {
3260 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3261 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3262 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3263
3264 unpack_value_bitfield (res_val, bitpos, bitsize,
3265 valaddr, embedded_offset, val);
3266
3267 return res_val;
3268 }
3269
3270 /* Modify the value of a bitfield. ADDR points to a block of memory in
3271 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3272 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3273 indicate which bits (in target bit order) comprise the bitfield.
3274 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3275 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3276
3277 void
3278 modify_field (struct type *type, gdb_byte *addr,
3279 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3280 {
3281 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3282 ULONGEST oword;
3283 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3284 LONGEST bytesize;
3285
3286 /* Normalize BITPOS. */
3287 addr += bitpos / 8;
3288 bitpos %= 8;
3289
3290 /* If a negative fieldval fits in the field in question, chop
3291 off the sign extension bits. */
3292 if ((~fieldval & ~(mask >> 1)) == 0)
3293 fieldval &= mask;
3294
3295 /* Warn if value is too big to fit in the field in question. */
3296 if (0 != (fieldval & ~mask))
3297 {
3298 /* FIXME: would like to include fieldval in the message, but
3299 we don't have a sprintf_longest. */
3300 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3301
3302 /* Truncate it, otherwise adjoining fields may be corrupted. */
3303 fieldval &= mask;
3304 }
3305
3306 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3307 false valgrind reports. */
3308
3309 bytesize = (bitpos + bitsize + 7) / 8;
3310 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3311
3312 /* Shifting for bit field depends on endianness of the target machine. */
3313 if (gdbarch_bits_big_endian (get_type_arch (type)))
3314 bitpos = bytesize * 8 - bitpos - bitsize;
3315
3316 oword &= ~(mask << bitpos);
3317 oword |= fieldval << bitpos;
3318
3319 store_unsigned_integer (addr, bytesize, byte_order, oword);
3320 }
3321 \f
3322 /* Pack NUM into BUF using a target format of TYPE. */
3323
3324 void
3325 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3326 {
3327 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3328 LONGEST len;
3329
3330 type = check_typedef (type);
3331 len = TYPE_LENGTH (type);
3332
3333 switch (TYPE_CODE (type))
3334 {
3335 case TYPE_CODE_INT:
3336 case TYPE_CODE_CHAR:
3337 case TYPE_CODE_ENUM:
3338 case TYPE_CODE_FLAGS:
3339 case TYPE_CODE_BOOL:
3340 case TYPE_CODE_RANGE:
3341 case TYPE_CODE_MEMBERPTR:
3342 store_signed_integer (buf, len, byte_order, num);
3343 break;
3344
3345 case TYPE_CODE_REF:
3346 case TYPE_CODE_RVALUE_REF:
3347 case TYPE_CODE_PTR:
3348 store_typed_address (buf, type, (CORE_ADDR) num);
3349 break;
3350
3351 case TYPE_CODE_FLT:
3352 case TYPE_CODE_DECFLOAT:
3353 target_float_from_longest (buf, type, num);
3354 break;
3355
3356 default:
3357 error (_("Unexpected type (%d) encountered for integer constant."),
3358 TYPE_CODE (type));
3359 }
3360 }
3361
3362
3363 /* Pack NUM into BUF using a target format of TYPE. */
3364
3365 static void
3366 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3367 {
3368 LONGEST len;
3369 enum bfd_endian byte_order;
3370
3371 type = check_typedef (type);
3372 len = TYPE_LENGTH (type);
3373 byte_order = gdbarch_byte_order (get_type_arch (type));
3374
3375 switch (TYPE_CODE (type))
3376 {
3377 case TYPE_CODE_INT:
3378 case TYPE_CODE_CHAR:
3379 case TYPE_CODE_ENUM:
3380 case TYPE_CODE_FLAGS:
3381 case TYPE_CODE_BOOL:
3382 case TYPE_CODE_RANGE:
3383 case TYPE_CODE_MEMBERPTR:
3384 store_unsigned_integer (buf, len, byte_order, num);
3385 break;
3386
3387 case TYPE_CODE_REF:
3388 case TYPE_CODE_RVALUE_REF:
3389 case TYPE_CODE_PTR:
3390 store_typed_address (buf, type, (CORE_ADDR) num);
3391 break;
3392
3393 case TYPE_CODE_FLT:
3394 case TYPE_CODE_DECFLOAT:
3395 target_float_from_ulongest (buf, type, num);
3396 break;
3397
3398 default:
3399 error (_("Unexpected type (%d) encountered "
3400 "for unsigned integer constant."),
3401 TYPE_CODE (type));
3402 }
3403 }
3404
3405
3406 /* Convert C numbers into newly allocated values. */
3407
3408 struct value *
3409 value_from_longest (struct type *type, LONGEST num)
3410 {
3411 struct value *val = allocate_value (type);
3412
3413 pack_long (value_contents_raw (val), type, num);
3414 return val;
3415 }
3416
3417
3418 /* Convert C unsigned numbers into newly allocated values. */
3419
3420 struct value *
3421 value_from_ulongest (struct type *type, ULONGEST num)
3422 {
3423 struct value *val = allocate_value (type);
3424
3425 pack_unsigned_long (value_contents_raw (val), type, num);
3426
3427 return val;
3428 }
3429
3430
3431 /* Create a value representing a pointer of type TYPE to the address
3432 ADDR. */
3433
3434 struct value *
3435 value_from_pointer (struct type *type, CORE_ADDR addr)
3436 {
3437 struct value *val = allocate_value (type);
3438
3439 store_typed_address (value_contents_raw (val),
3440 check_typedef (type), addr);
3441 return val;
3442 }
3443
3444
3445 /* Create a value of type TYPE whose contents come from VALADDR, if it
3446 is non-null, and whose memory address (in the inferior) is
3447 ADDRESS. The type of the created value may differ from the passed
3448 type TYPE. Make sure to retrieve values new type after this call.
3449 Note that TYPE is not passed through resolve_dynamic_type; this is
3450 a special API intended for use only by Ada. */
3451
3452 struct value *
3453 value_from_contents_and_address_unresolved (struct type *type,
3454 const gdb_byte *valaddr,
3455 CORE_ADDR address)
3456 {
3457 struct value *v;
3458
3459 if (valaddr == NULL)
3460 v = allocate_value_lazy (type);
3461 else
3462 v = value_from_contents (type, valaddr);
3463 VALUE_LVAL (v) = lval_memory;
3464 set_value_address (v, address);
3465 return v;
3466 }
3467
3468 /* Create a value of type TYPE whose contents come from VALADDR, if it
3469 is non-null, and whose memory address (in the inferior) is
3470 ADDRESS. The type of the created value may differ from the passed
3471 type TYPE. Make sure to retrieve values new type after this call. */
3472
3473 struct value *
3474 value_from_contents_and_address (struct type *type,
3475 const gdb_byte *valaddr,
3476 CORE_ADDR address)
3477 {
3478 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3479 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3480 struct value *v;
3481
3482 if (valaddr == NULL)
3483 v = allocate_value_lazy (resolved_type);
3484 else
3485 v = value_from_contents (resolved_type, valaddr);
3486 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3487 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3488 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3489 VALUE_LVAL (v) = lval_memory;
3490 set_value_address (v, address);
3491 return v;
3492 }
3493
3494 /* Create a value of type TYPE holding the contents CONTENTS.
3495 The new value is `not_lval'. */
3496
3497 struct value *
3498 value_from_contents (struct type *type, const gdb_byte *contents)
3499 {
3500 struct value *result;
3501
3502 result = allocate_value (type);
3503 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3504 return result;
3505 }
3506
3507 /* Extract a value from the history file. Input will be of the form
3508 $digits or $$digits. See block comment above 'write_dollar_variable'
3509 for details. */
3510
3511 struct value *
3512 value_from_history_ref (const char *h, const char **endp)
3513 {
3514 int index, len;
3515
3516 if (h[0] == '$')
3517 len = 1;
3518 else
3519 return NULL;
3520
3521 if (h[1] == '$')
3522 len = 2;
3523
3524 /* Find length of numeral string. */
3525 for (; isdigit (h[len]); len++)
3526 ;
3527
3528 /* Make sure numeral string is not part of an identifier. */
3529 if (h[len] == '_' || isalpha (h[len]))
3530 return NULL;
3531
3532 /* Now collect the index value. */
3533 if (h[1] == '$')
3534 {
3535 if (len == 2)
3536 {
3537 /* For some bizarre reason, "$$" is equivalent to "$$1",
3538 rather than to "$$0" as it ought to be! */
3539 index = -1;
3540 *endp += len;
3541 }
3542 else
3543 {
3544 char *local_end;
3545
3546 index = -strtol (&h[2], &local_end, 10);
3547 *endp = local_end;
3548 }
3549 }
3550 else
3551 {
3552 if (len == 1)
3553 {
3554 /* "$" is equivalent to "$0". */
3555 index = 0;
3556 *endp += len;
3557 }
3558 else
3559 {
3560 char *local_end;
3561
3562 index = strtol (&h[1], &local_end, 10);
3563 *endp = local_end;
3564 }
3565 }
3566
3567 return access_value_history (index);
3568 }
3569
3570 /* Get the component value (offset by OFFSET bytes) of a struct or
3571 union WHOLE. Component's type is TYPE. */
3572
3573 struct value *
3574 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3575 {
3576 struct value *v;
3577
3578 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3579 v = allocate_value_lazy (type);
3580 else
3581 {
3582 v = allocate_value (type);
3583 value_contents_copy (v, value_embedded_offset (v),
3584 whole, value_embedded_offset (whole) + offset,
3585 type_length_units (type));
3586 }
3587 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3588 set_value_component_location (v, whole);
3589
3590 return v;
3591 }
3592
3593 struct value *
3594 coerce_ref_if_computed (const struct value *arg)
3595 {
3596 const struct lval_funcs *funcs;
3597
3598 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3599 return NULL;
3600
3601 if (value_lval_const (arg) != lval_computed)
3602 return NULL;
3603
3604 funcs = value_computed_funcs (arg);
3605 if (funcs->coerce_ref == NULL)
3606 return NULL;
3607
3608 return funcs->coerce_ref (arg);
3609 }
3610
3611 /* Look at value.h for description. */
3612
3613 struct value *
3614 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3615 const struct type *original_type,
3616 const struct value *original_value)
3617 {
3618 /* Re-adjust type. */
3619 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3620
3621 /* Add embedding info. */
3622 set_value_enclosing_type (value, enc_type);
3623 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3624
3625 /* We may be pointing to an object of some derived type. */
3626 return value_full_object (value, NULL, 0, 0, 0);
3627 }
3628
3629 struct value *
3630 coerce_ref (struct value *arg)
3631 {
3632 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3633 struct value *retval;
3634 struct type *enc_type;
3635
3636 retval = coerce_ref_if_computed (arg);
3637 if (retval)
3638 return retval;
3639
3640 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3641 return arg;
3642
3643 enc_type = check_typedef (value_enclosing_type (arg));
3644 enc_type = TYPE_TARGET_TYPE (enc_type);
3645
3646 retval = value_at_lazy (enc_type,
3647 unpack_pointer (value_type (arg),
3648 value_contents (arg)));
3649 enc_type = value_type (retval);
3650 return readjust_indirect_value_type (retval, enc_type,
3651 value_type_arg_tmp, arg);
3652 }
3653
3654 struct value *
3655 coerce_array (struct value *arg)
3656 {
3657 struct type *type;
3658
3659 arg = coerce_ref (arg);
3660 type = check_typedef (value_type (arg));
3661
3662 switch (TYPE_CODE (type))
3663 {
3664 case TYPE_CODE_ARRAY:
3665 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3666 arg = value_coerce_array (arg);
3667 break;
3668 case TYPE_CODE_FUNC:
3669 arg = value_coerce_function (arg);
3670 break;
3671 }
3672 return arg;
3673 }
3674 \f
3675
3676 /* Return the return value convention that will be used for the
3677 specified type. */
3678
3679 enum return_value_convention
3680 struct_return_convention (struct gdbarch *gdbarch,
3681 struct value *function, struct type *value_type)
3682 {
3683 enum type_code code = TYPE_CODE (value_type);
3684
3685 if (code == TYPE_CODE_ERROR)
3686 error (_("Function return type unknown."));
3687
3688 /* Probe the architecture for the return-value convention. */
3689 return gdbarch_return_value (gdbarch, function, value_type,
3690 NULL, NULL, NULL);
3691 }
3692
3693 /* Return true if the function returning the specified type is using
3694 the convention of returning structures in memory (passing in the
3695 address as a hidden first parameter). */
3696
3697 int
3698 using_struct_return (struct gdbarch *gdbarch,
3699 struct value *function, struct type *value_type)
3700 {
3701 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3702 /* A void return value is never in memory. See also corresponding
3703 code in "print_return_value". */
3704 return 0;
3705
3706 return (struct_return_convention (gdbarch, function, value_type)
3707 != RETURN_VALUE_REGISTER_CONVENTION);
3708 }
3709
3710 /* Set the initialized field in a value struct. */
3711
3712 void
3713 set_value_initialized (struct value *val, int status)
3714 {
3715 val->initialized = status;
3716 }
3717
3718 /* Return the initialized field in a value struct. */
3719
3720 int
3721 value_initialized (const struct value *val)
3722 {
3723 return val->initialized;
3724 }
3725
3726 /* Load the actual content of a lazy value. Fetch the data from the
3727 user's process and clear the lazy flag to indicate that the data in
3728 the buffer is valid.
3729
3730 If the value is zero-length, we avoid calling read_memory, which
3731 would abort. We mark the value as fetched anyway -- all 0 bytes of
3732 it. */
3733
3734 void
3735 value_fetch_lazy (struct value *val)
3736 {
3737 gdb_assert (value_lazy (val));
3738 allocate_value_contents (val);
3739 /* A value is either lazy, or fully fetched. The
3740 availability/validity is only established as we try to fetch a
3741 value. */
3742 gdb_assert (VEC_empty (range_s, val->optimized_out));
3743 gdb_assert (VEC_empty (range_s, val->unavailable));
3744 if (value_bitsize (val))
3745 {
3746 /* To read a lazy bitfield, read the entire enclosing value. This
3747 prevents reading the same block of (possibly volatile) memory once
3748 per bitfield. It would be even better to read only the containing
3749 word, but we have no way to record that just specific bits of a
3750 value have been fetched. */
3751 struct type *type = check_typedef (value_type (val));
3752 struct value *parent = value_parent (val);
3753
3754 if (value_lazy (parent))
3755 value_fetch_lazy (parent);
3756
3757 unpack_value_bitfield (val,
3758 value_bitpos (val), value_bitsize (val),
3759 value_contents_for_printing (parent),
3760 value_offset (val), parent);
3761 }
3762 else if (VALUE_LVAL (val) == lval_memory)
3763 {
3764 CORE_ADDR addr = value_address (val);
3765 struct type *type = check_typedef (value_enclosing_type (val));
3766
3767 if (TYPE_LENGTH (type))
3768 read_value_memory (val, 0, value_stack (val),
3769 addr, value_contents_all_raw (val),
3770 type_length_units (type));
3771 }
3772 else if (VALUE_LVAL (val) == lval_register)
3773 {
3774 struct frame_info *next_frame;
3775 int regnum;
3776 struct type *type = check_typedef (value_type (val));
3777 struct value *new_val = val, *mark = value_mark ();
3778
3779 /* Offsets are not supported here; lazy register values must
3780 refer to the entire register. */
3781 gdb_assert (value_offset (val) == 0);
3782
3783 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3784 {
3785 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3786
3787 next_frame = frame_find_by_id (next_frame_id);
3788 regnum = VALUE_REGNUM (new_val);
3789
3790 gdb_assert (next_frame != NULL);
3791
3792 /* Convertible register routines are used for multi-register
3793 values and for interpretation in different types
3794 (e.g. float or int from a double register). Lazy
3795 register values should have the register's natural type,
3796 so they do not apply. */
3797 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3798 regnum, type));
3799
3800 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3801 Since a "->next" operation was performed when setting
3802 this field, we do not need to perform a "next" operation
3803 again when unwinding the register. That's why
3804 frame_unwind_register_value() is called here instead of
3805 get_frame_register_value(). */
3806 new_val = frame_unwind_register_value (next_frame, regnum);
3807
3808 /* If we get another lazy lval_register value, it means the
3809 register is found by reading it from NEXT_FRAME's next frame.
3810 frame_unwind_register_value should never return a value with
3811 the frame id pointing to NEXT_FRAME. If it does, it means we
3812 either have two consecutive frames with the same frame id
3813 in the frame chain, or some code is trying to unwind
3814 behind get_prev_frame's back (e.g., a frame unwind
3815 sniffer trying to unwind), bypassing its validations. In
3816 any case, it should always be an internal error to end up
3817 in this situation. */
3818 if (VALUE_LVAL (new_val) == lval_register
3819 && value_lazy (new_val)
3820 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3821 internal_error (__FILE__, __LINE__,
3822 _("infinite loop while fetching a register"));
3823 }
3824
3825 /* If it's still lazy (for instance, a saved register on the
3826 stack), fetch it. */
3827 if (value_lazy (new_val))
3828 value_fetch_lazy (new_val);
3829
3830 /* Copy the contents and the unavailability/optimized-out
3831 meta-data from NEW_VAL to VAL. */
3832 set_value_lazy (val, 0);
3833 value_contents_copy (val, value_embedded_offset (val),
3834 new_val, value_embedded_offset (new_val),
3835 type_length_units (type));
3836
3837 if (frame_debug)
3838 {
3839 struct gdbarch *gdbarch;
3840 struct frame_info *frame;
3841 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3842 so that the frame level will be shown correctly. */
3843 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3844 regnum = VALUE_REGNUM (val);
3845 gdbarch = get_frame_arch (frame);
3846
3847 fprintf_unfiltered (gdb_stdlog,
3848 "{ value_fetch_lazy "
3849 "(frame=%d,regnum=%d(%s),...) ",
3850 frame_relative_level (frame), regnum,
3851 user_reg_map_regnum_to_name (gdbarch, regnum));
3852
3853 fprintf_unfiltered (gdb_stdlog, "->");
3854 if (value_optimized_out (new_val))
3855 {
3856 fprintf_unfiltered (gdb_stdlog, " ");
3857 val_print_optimized_out (new_val, gdb_stdlog);
3858 }
3859 else
3860 {
3861 int i;
3862 const gdb_byte *buf = value_contents (new_val);
3863
3864 if (VALUE_LVAL (new_val) == lval_register)
3865 fprintf_unfiltered (gdb_stdlog, " register=%d",
3866 VALUE_REGNUM (new_val));
3867 else if (VALUE_LVAL (new_val) == lval_memory)
3868 fprintf_unfiltered (gdb_stdlog, " address=%s",
3869 paddress (gdbarch,
3870 value_address (new_val)));
3871 else
3872 fprintf_unfiltered (gdb_stdlog, " computed");
3873
3874 fprintf_unfiltered (gdb_stdlog, " bytes=");
3875 fprintf_unfiltered (gdb_stdlog, "[");
3876 for (i = 0; i < register_size (gdbarch, regnum); i++)
3877 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3878 fprintf_unfiltered (gdb_stdlog, "]");
3879 }
3880
3881 fprintf_unfiltered (gdb_stdlog, " }\n");
3882 }
3883
3884 /* Dispose of the intermediate values. This prevents
3885 watchpoints from trying to watch the saved frame pointer. */
3886 value_free_to_mark (mark);
3887 }
3888 else if (VALUE_LVAL (val) == lval_computed
3889 && value_computed_funcs (val)->read != NULL)
3890 value_computed_funcs (val)->read (val);
3891 else
3892 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3893
3894 set_value_lazy (val, 0);
3895 }
3896
3897 /* Implementation of the convenience function $_isvoid. */
3898
3899 static struct value *
3900 isvoid_internal_fn (struct gdbarch *gdbarch,
3901 const struct language_defn *language,
3902 void *cookie, int argc, struct value **argv)
3903 {
3904 int ret;
3905
3906 if (argc != 1)
3907 error (_("You must provide one argument for $_isvoid."));
3908
3909 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3910
3911 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3912 }
3913
3914 void
3915 _initialize_values (void)
3916 {
3917 add_cmd ("convenience", no_class, show_convenience, _("\
3918 Debugger convenience (\"$foo\") variables and functions.\n\
3919 Convenience variables are created when you assign them values;\n\
3920 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3921 \n\
3922 A few convenience variables are given values automatically:\n\
3923 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3924 \"$__\" holds the contents of the last address examined with \"x\"."
3925 #ifdef HAVE_PYTHON
3926 "\n\n\
3927 Convenience functions are defined via the Python API."
3928 #endif
3929 ), &showlist);
3930 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3931
3932 add_cmd ("values", no_set_class, show_values, _("\
3933 Elements of value history around item number IDX (or last ten)."),
3934 &showlist);
3935
3936 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3937 Initialize a convenience variable if necessary.\n\
3938 init-if-undefined VARIABLE = EXPRESSION\n\
3939 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3940 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3941 VARIABLE is already initialized."));
3942
3943 add_prefix_cmd ("function", no_class, function_command, _("\
3944 Placeholder command for showing help on convenience functions."),
3945 &functionlist, "function ", 0, &cmdlist);
3946
3947 add_internal_function ("_isvoid", _("\
3948 Check whether an expression is void.\n\
3949 Usage: $_isvoid (expression)\n\
3950 Return 1 if the expression is void, zero otherwise."),
3951 isvoid_internal_fn, NULL);
3952
3953 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
3954 class_support, &max_value_size, _("\
3955 Set maximum sized value gdb will load from the inferior."), _("\
3956 Show maximum sized value gdb will load from the inferior."), _("\
3957 Use this to control the maximum size, in bytes, of a value that gdb\n\
3958 will load from the inferior. Setting this value to 'unlimited'\n\
3959 disables checking.\n\
3960 Setting this does not invalidate already allocated values, it only\n\
3961 prevents future values, larger than this size, from being allocated."),
3962 set_max_value_size,
3963 show_max_value_size,
3964 &setlist, &showlist);
3965 }
This page took 0.108994 seconds and 4 git commands to generate.