Add NEWS entry.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47 #include "expop.h"
48
49 /* Definition of a user function. */
50 struct internal_function
51 {
52 /* The name of the function. It is a bit odd to have this in the
53 function itself -- the user might use a differently-named
54 convenience variable to hold the function. */
55 char *name;
56
57 /* The handler. */
58 internal_function_fn handler;
59
60 /* User data for the handler. */
61 void *cookie;
62 };
63
64 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
65
66 struct range
67 {
68 /* Lowest offset in the range. */
69 LONGEST offset;
70
71 /* Length of the range. */
72 LONGEST length;
73
74 /* Returns true if THIS is strictly less than OTHER, useful for
75 searching. We keep ranges sorted by offset and coalesce
76 overlapping and contiguous ranges, so this just compares the
77 starting offset. */
78
79 bool operator< (const range &other) const
80 {
81 return offset < other.offset;
82 }
83
84 /* Returns true if THIS is equal to OTHER. */
85 bool operator== (const range &other) const
86 {
87 return offset == other.offset && length == other.length;
88 }
89 };
90
91 /* Returns true if the ranges defined by [offset1, offset1+len1) and
92 [offset2, offset2+len2) overlap. */
93
94 static int
95 ranges_overlap (LONGEST offset1, LONGEST len1,
96 LONGEST offset2, LONGEST len2)
97 {
98 ULONGEST h, l;
99
100 l = std::max (offset1, offset2);
101 h = std::min (offset1 + len1, offset2 + len2);
102 return (l < h);
103 }
104
105 /* Returns true if RANGES contains any range that overlaps [OFFSET,
106 OFFSET+LENGTH). */
107
108 static int
109 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
110 LONGEST length)
111 {
112 range what;
113
114 what.offset = offset;
115 what.length = length;
116
117 /* We keep ranges sorted by offset and coalesce overlapping and
118 contiguous ranges, so to check if a range list contains a given
119 range, we can do a binary search for the position the given range
120 would be inserted if we only considered the starting OFFSET of
121 ranges. We call that position I. Since we also have LENGTH to
122 care for (this is a range afterall), we need to check if the
123 _previous_ range overlaps the I range. E.g.,
124
125 R
126 |---|
127 |---| |---| |------| ... |--|
128 0 1 2 N
129
130 I=1
131
132 In the case above, the binary search would return `I=1', meaning,
133 this OFFSET should be inserted at position 1, and the current
134 position 1 should be pushed further (and before 2). But, `0'
135 overlaps with R.
136
137 Then we need to check if the I range overlaps the I range itself.
138 E.g.,
139
140 R
141 |---|
142 |---| |---| |-------| ... |--|
143 0 1 2 N
144
145 I=1
146 */
147
148
149 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
150
151 if (i > ranges.begin ())
152 {
153 const struct range &bef = *(i - 1);
154
155 if (ranges_overlap (bef.offset, bef.length, offset, length))
156 return 1;
157 }
158
159 if (i < ranges.end ())
160 {
161 const struct range &r = *i;
162
163 if (ranges_overlap (r.offset, r.length, offset, length))
164 return 1;
165 }
166
167 return 0;
168 }
169
170 static struct cmd_list_element *functionlist;
171
172 /* Note that the fields in this structure are arranged to save a bit
173 of memory. */
174
175 struct value
176 {
177 explicit value (struct type *type_)
178 : modifiable (1),
179 lazy (1),
180 initialized (1),
181 stack (0),
182 type (type_),
183 enclosing_type (type_)
184 {
185 }
186
187 ~value ()
188 {
189 if (VALUE_LVAL (this) == lval_computed)
190 {
191 const struct lval_funcs *funcs = location.computed.funcs;
192
193 if (funcs->free_closure)
194 funcs->free_closure (this);
195 }
196 else if (VALUE_LVAL (this) == lval_xcallable)
197 delete location.xm_worker;
198 }
199
200 DISABLE_COPY_AND_ASSIGN (value);
201
202 /* Type of value; either not an lval, or one of the various
203 different possible kinds of lval. */
204 enum lval_type lval = not_lval;
205
206 /* Is it modifiable? Only relevant if lval != not_lval. */
207 unsigned int modifiable : 1;
208
209 /* If zero, contents of this value are in the contents field. If
210 nonzero, contents are in inferior. If the lval field is lval_memory,
211 the contents are in inferior memory at location.address plus offset.
212 The lval field may also be lval_register.
213
214 WARNING: This field is used by the code which handles watchpoints
215 (see breakpoint.c) to decide whether a particular value can be
216 watched by hardware watchpoints. If the lazy flag is set for
217 some member of a value chain, it is assumed that this member of
218 the chain doesn't need to be watched as part of watching the
219 value itself. This is how GDB avoids watching the entire struct
220 or array when the user wants to watch a single struct member or
221 array element. If you ever change the way lazy flag is set and
222 reset, be sure to consider this use as well! */
223 unsigned int lazy : 1;
224
225 /* If value is a variable, is it initialized or not. */
226 unsigned int initialized : 1;
227
228 /* If value is from the stack. If this is set, read_stack will be
229 used instead of read_memory to enable extra caching. */
230 unsigned int stack : 1;
231
232 /* Location of value (if lval). */
233 union
234 {
235 /* If lval == lval_memory, this is the address in the inferior */
236 CORE_ADDR address;
237
238 /*If lval == lval_register, the value is from a register. */
239 struct
240 {
241 /* Register number. */
242 int regnum;
243 /* Frame ID of "next" frame to which a register value is relative.
244 If the register value is found relative to frame F, then the
245 frame id of F->next will be stored in next_frame_id. */
246 struct frame_id next_frame_id;
247 } reg;
248
249 /* Pointer to internal variable. */
250 struct internalvar *internalvar;
251
252 /* Pointer to xmethod worker. */
253 struct xmethod_worker *xm_worker;
254
255 /* If lval == lval_computed, this is a set of function pointers
256 to use to access and describe the value, and a closure pointer
257 for them to use. */
258 struct
259 {
260 /* Functions to call. */
261 const struct lval_funcs *funcs;
262
263 /* Closure for those functions to use. */
264 void *closure;
265 } computed;
266 } location {};
267
268 /* Describes offset of a value within lval of a structure in target
269 addressable memory units. Note also the member embedded_offset
270 below. */
271 LONGEST offset = 0;
272
273 /* Only used for bitfields; number of bits contained in them. */
274 LONGEST bitsize = 0;
275
276 /* Only used for bitfields; position of start of field. For
277 little-endian targets, it is the position of the LSB. For
278 big-endian targets, it is the position of the MSB. */
279 LONGEST bitpos = 0;
280
281 /* The number of references to this value. When a value is created,
282 the value chain holds a reference, so REFERENCE_COUNT is 1. If
283 release_value is called, this value is removed from the chain but
284 the caller of release_value now has a reference to this value.
285 The caller must arrange for a call to value_free later. */
286 int reference_count = 1;
287
288 /* Only used for bitfields; the containing value. This allows a
289 single read from the target when displaying multiple
290 bitfields. */
291 value_ref_ptr parent;
292
293 /* Type of the value. */
294 struct type *type;
295
296 /* If a value represents a C++ object, then the `type' field gives
297 the object's compile-time type. If the object actually belongs
298 to some class derived from `type', perhaps with other base
299 classes and additional members, then `type' is just a subobject
300 of the real thing, and the full object is probably larger than
301 `type' would suggest.
302
303 If `type' is a dynamic class (i.e. one with a vtable), then GDB
304 can actually determine the object's run-time type by looking at
305 the run-time type information in the vtable. When this
306 information is available, we may elect to read in the entire
307 object, for several reasons:
308
309 - When printing the value, the user would probably rather see the
310 full object, not just the limited portion apparent from the
311 compile-time type.
312
313 - If `type' has virtual base classes, then even printing `type'
314 alone may require reaching outside the `type' portion of the
315 object to wherever the virtual base class has been stored.
316
317 When we store the entire object, `enclosing_type' is the run-time
318 type -- the complete object -- and `embedded_offset' is the
319 offset of `type' within that larger type, in target addressable memory
320 units. The value_contents() macro takes `embedded_offset' into account,
321 so most GDB code continues to see the `type' portion of the value, just
322 as the inferior would.
323
324 If `type' is a pointer to an object, then `enclosing_type' is a
325 pointer to the object's run-time type, and `pointed_to_offset' is
326 the offset in target addressable memory units from the full object
327 to the pointed-to object -- that is, the value `embedded_offset' would
328 have if we followed the pointer and fetched the complete object.
329 (I don't really see the point. Why not just determine the
330 run-time type when you indirect, and avoid the special case? The
331 contents don't matter until you indirect anyway.)
332
333 If we're not doing anything fancy, `enclosing_type' is equal to
334 `type', and `embedded_offset' is zero, so everything works
335 normally. */
336 struct type *enclosing_type;
337 LONGEST embedded_offset = 0;
338 LONGEST pointed_to_offset = 0;
339
340 /* Actual contents of the value. Target byte-order. NULL or not
341 valid if lazy is nonzero. */
342 gdb::unique_xmalloc_ptr<gdb_byte> contents;
343
344 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
345 rather than available, since the common and default case is for a
346 value to be available. This is filled in at value read time.
347 The unavailable ranges are tracked in bits. Note that a contents
348 bit that has been optimized out doesn't really exist in the
349 program, so it can't be marked unavailable either. */
350 std::vector<range> unavailable;
351
352 /* Likewise, but for optimized out contents (a chunk of the value of
353 a variable that does not actually exist in the program). If LVAL
354 is lval_register, this is a register ($pc, $sp, etc., never a
355 program variable) that has not been saved in the frame. Not
356 saved registers and optimized-out program variables values are
357 treated pretty much the same, except not-saved registers have a
358 different string representation and related error strings. */
359 std::vector<range> optimized_out;
360 };
361
362 /* See value.h. */
363
364 struct gdbarch *
365 get_value_arch (const struct value *value)
366 {
367 return value_type (value)->arch ();
368 }
369
370 int
371 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
372 {
373 gdb_assert (!value->lazy);
374
375 return !ranges_contain (value->unavailable, offset, length);
376 }
377
378 int
379 value_bytes_available (const struct value *value,
380 LONGEST offset, LONGEST length)
381 {
382 return value_bits_available (value,
383 offset * TARGET_CHAR_BIT,
384 length * TARGET_CHAR_BIT);
385 }
386
387 int
388 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
389 {
390 gdb_assert (!value->lazy);
391
392 return ranges_contain (value->optimized_out, bit_offset, bit_length);
393 }
394
395 int
396 value_entirely_available (struct value *value)
397 {
398 /* We can only tell whether the whole value is available when we try
399 to read it. */
400 if (value->lazy)
401 value_fetch_lazy (value);
402
403 if (value->unavailable.empty ())
404 return 1;
405 return 0;
406 }
407
408 /* Returns true if VALUE is entirely covered by RANGES. If the value
409 is lazy, it'll be read now. Note that RANGE is a pointer to
410 pointer because reading the value might change *RANGE. */
411
412 static int
413 value_entirely_covered_by_range_vector (struct value *value,
414 const std::vector<range> &ranges)
415 {
416 /* We can only tell whether the whole value is optimized out /
417 unavailable when we try to read it. */
418 if (value->lazy)
419 value_fetch_lazy (value);
420
421 if (ranges.size () == 1)
422 {
423 const struct range &t = ranges[0];
424
425 if (t.offset == 0
426 && t.length == (TARGET_CHAR_BIT
427 * TYPE_LENGTH (value_enclosing_type (value))))
428 return 1;
429 }
430
431 return 0;
432 }
433
434 int
435 value_entirely_unavailable (struct value *value)
436 {
437 return value_entirely_covered_by_range_vector (value, value->unavailable);
438 }
439
440 int
441 value_entirely_optimized_out (struct value *value)
442 {
443 return value_entirely_covered_by_range_vector (value, value->optimized_out);
444 }
445
446 /* Insert into the vector pointed to by VECTORP the bit range starting of
447 OFFSET bits, and extending for the next LENGTH bits. */
448
449 static void
450 insert_into_bit_range_vector (std::vector<range> *vectorp,
451 LONGEST offset, LONGEST length)
452 {
453 range newr;
454
455 /* Insert the range sorted. If there's overlap or the new range
456 would be contiguous with an existing range, merge. */
457
458 newr.offset = offset;
459 newr.length = length;
460
461 /* Do a binary search for the position the given range would be
462 inserted if we only considered the starting OFFSET of ranges.
463 Call that position I. Since we also have LENGTH to care for
464 (this is a range afterall), we need to check if the _previous_
465 range overlaps the I range. E.g., calling R the new range:
466
467 #1 - overlaps with previous
468
469 R
470 |-...-|
471 |---| |---| |------| ... |--|
472 0 1 2 N
473
474 I=1
475
476 In the case #1 above, the binary search would return `I=1',
477 meaning, this OFFSET should be inserted at position 1, and the
478 current position 1 should be pushed further (and become 2). But,
479 note that `0' overlaps with R, so we want to merge them.
480
481 A similar consideration needs to be taken if the new range would
482 be contiguous with the previous range:
483
484 #2 - contiguous with previous
485
486 R
487 |-...-|
488 |--| |---| |------| ... |--|
489 0 1 2 N
490
491 I=1
492
493 If there's no overlap with the previous range, as in:
494
495 #3 - not overlapping and not contiguous
496
497 R
498 |-...-|
499 |--| |---| |------| ... |--|
500 0 1 2 N
501
502 I=1
503
504 or if I is 0:
505
506 #4 - R is the range with lowest offset
507
508 R
509 |-...-|
510 |--| |---| |------| ... |--|
511 0 1 2 N
512
513 I=0
514
515 ... we just push the new range to I.
516
517 All the 4 cases above need to consider that the new range may
518 also overlap several of the ranges that follow, or that R may be
519 contiguous with the following range, and merge. E.g.,
520
521 #5 - overlapping following ranges
522
523 R
524 |------------------------|
525 |--| |---| |------| ... |--|
526 0 1 2 N
527
528 I=0
529
530 or:
531
532 R
533 |-------|
534 |--| |---| |------| ... |--|
535 0 1 2 N
536
537 I=1
538
539 */
540
541 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
542 if (i > vectorp->begin ())
543 {
544 struct range &bef = *(i - 1);
545
546 if (ranges_overlap (bef.offset, bef.length, offset, length))
547 {
548 /* #1 */
549 ULONGEST l = std::min (bef.offset, offset);
550 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
551
552 bef.offset = l;
553 bef.length = h - l;
554 i--;
555 }
556 else if (offset == bef.offset + bef.length)
557 {
558 /* #2 */
559 bef.length += length;
560 i--;
561 }
562 else
563 {
564 /* #3 */
565 i = vectorp->insert (i, newr);
566 }
567 }
568 else
569 {
570 /* #4 */
571 i = vectorp->insert (i, newr);
572 }
573
574 /* Check whether the ranges following the one we've just added or
575 touched can be folded in (#5 above). */
576 if (i != vectorp->end () && i + 1 < vectorp->end ())
577 {
578 int removed = 0;
579 auto next = i + 1;
580
581 /* Get the range we just touched. */
582 struct range &t = *i;
583 removed = 0;
584
585 i = next;
586 for (; i < vectorp->end (); i++)
587 {
588 struct range &r = *i;
589 if (r.offset <= t.offset + t.length)
590 {
591 ULONGEST l, h;
592
593 l = std::min (t.offset, r.offset);
594 h = std::max (t.offset + t.length, r.offset + r.length);
595
596 t.offset = l;
597 t.length = h - l;
598
599 removed++;
600 }
601 else
602 {
603 /* If we couldn't merge this one, we won't be able to
604 merge following ones either, since the ranges are
605 always sorted by OFFSET. */
606 break;
607 }
608 }
609
610 if (removed != 0)
611 vectorp->erase (next, next + removed);
612 }
613 }
614
615 void
616 mark_value_bits_unavailable (struct value *value,
617 LONGEST offset, LONGEST length)
618 {
619 insert_into_bit_range_vector (&value->unavailable, offset, length);
620 }
621
622 void
623 mark_value_bytes_unavailable (struct value *value,
624 LONGEST offset, LONGEST length)
625 {
626 mark_value_bits_unavailable (value,
627 offset * TARGET_CHAR_BIT,
628 length * TARGET_CHAR_BIT);
629 }
630
631 /* Find the first range in RANGES that overlaps the range defined by
632 OFFSET and LENGTH, starting at element POS in the RANGES vector,
633 Returns the index into RANGES where such overlapping range was
634 found, or -1 if none was found. */
635
636 static int
637 find_first_range_overlap (const std::vector<range> *ranges, int pos,
638 LONGEST offset, LONGEST length)
639 {
640 int i;
641
642 for (i = pos; i < ranges->size (); i++)
643 {
644 const range &r = (*ranges)[i];
645 if (ranges_overlap (r.offset, r.length, offset, length))
646 return i;
647 }
648
649 return -1;
650 }
651
652 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
653 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
654 return non-zero.
655
656 It must always be the case that:
657 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
658
659 It is assumed that memory can be accessed from:
660 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
661 to:
662 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
663 / TARGET_CHAR_BIT) */
664 static int
665 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
666 const gdb_byte *ptr2, size_t offset2_bits,
667 size_t length_bits)
668 {
669 gdb_assert (offset1_bits % TARGET_CHAR_BIT
670 == offset2_bits % TARGET_CHAR_BIT);
671
672 if (offset1_bits % TARGET_CHAR_BIT != 0)
673 {
674 size_t bits;
675 gdb_byte mask, b1, b2;
676
677 /* The offset from the base pointers PTR1 and PTR2 is not a complete
678 number of bytes. A number of bits up to either the next exact
679 byte boundary, or LENGTH_BITS (which ever is sooner) will be
680 compared. */
681 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
682 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
683 mask = (1 << bits) - 1;
684
685 if (length_bits < bits)
686 {
687 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
688 bits = length_bits;
689 }
690
691 /* Now load the two bytes and mask off the bits we care about. */
692 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
693 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
694
695 if (b1 != b2)
696 return 1;
697
698 /* Now update the length and offsets to take account of the bits
699 we've just compared. */
700 length_bits -= bits;
701 offset1_bits += bits;
702 offset2_bits += bits;
703 }
704
705 if (length_bits % TARGET_CHAR_BIT != 0)
706 {
707 size_t bits;
708 size_t o1, o2;
709 gdb_byte mask, b1, b2;
710
711 /* The length is not an exact number of bytes. After the previous
712 IF.. block then the offsets are byte aligned, or the
713 length is zero (in which case this code is not reached). Compare
714 a number of bits at the end of the region, starting from an exact
715 byte boundary. */
716 bits = length_bits % TARGET_CHAR_BIT;
717 o1 = offset1_bits + length_bits - bits;
718 o2 = offset2_bits + length_bits - bits;
719
720 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
721 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
722
723 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
724 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
725
726 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
727 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
728
729 if (b1 != b2)
730 return 1;
731
732 length_bits -= bits;
733 }
734
735 if (length_bits > 0)
736 {
737 /* We've now taken care of any stray "bits" at the start, or end of
738 the region to compare, the remainder can be covered with a simple
739 memcmp. */
740 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
741 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
742 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
743
744 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
745 ptr2 + offset2_bits / TARGET_CHAR_BIT,
746 length_bits / TARGET_CHAR_BIT);
747 }
748
749 /* Length is zero, regions match. */
750 return 0;
751 }
752
753 /* Helper struct for find_first_range_overlap_and_match and
754 value_contents_bits_eq. Keep track of which slot of a given ranges
755 vector have we last looked at. */
756
757 struct ranges_and_idx
758 {
759 /* The ranges. */
760 const std::vector<range> *ranges;
761
762 /* The range we've last found in RANGES. Given ranges are sorted,
763 we can start the next lookup here. */
764 int idx;
765 };
766
767 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
768 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
769 ranges starting at OFFSET2 bits. Return true if the ranges match
770 and fill in *L and *H with the overlapping window relative to
771 (both) OFFSET1 or OFFSET2. */
772
773 static int
774 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
775 struct ranges_and_idx *rp2,
776 LONGEST offset1, LONGEST offset2,
777 LONGEST length, ULONGEST *l, ULONGEST *h)
778 {
779 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
780 offset1, length);
781 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
782 offset2, length);
783
784 if (rp1->idx == -1 && rp2->idx == -1)
785 {
786 *l = length;
787 *h = length;
788 return 1;
789 }
790 else if (rp1->idx == -1 || rp2->idx == -1)
791 return 0;
792 else
793 {
794 const range *r1, *r2;
795 ULONGEST l1, h1;
796 ULONGEST l2, h2;
797
798 r1 = &(*rp1->ranges)[rp1->idx];
799 r2 = &(*rp2->ranges)[rp2->idx];
800
801 /* Get the unavailable windows intersected by the incoming
802 ranges. The first and last ranges that overlap the argument
803 range may be wider than said incoming arguments ranges. */
804 l1 = std::max (offset1, r1->offset);
805 h1 = std::min (offset1 + length, r1->offset + r1->length);
806
807 l2 = std::max (offset2, r2->offset);
808 h2 = std::min (offset2 + length, offset2 + r2->length);
809
810 /* Make them relative to the respective start offsets, so we can
811 compare them for equality. */
812 l1 -= offset1;
813 h1 -= offset1;
814
815 l2 -= offset2;
816 h2 -= offset2;
817
818 /* Different ranges, no match. */
819 if (l1 != l2 || h1 != h2)
820 return 0;
821
822 *h = h1;
823 *l = l1;
824 return 1;
825 }
826 }
827
828 /* Helper function for value_contents_eq. The only difference is that
829 this function is bit rather than byte based.
830
831 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
832 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
833 Return true if the available bits match. */
834
835 static bool
836 value_contents_bits_eq (const struct value *val1, int offset1,
837 const struct value *val2, int offset2,
838 int length)
839 {
840 /* Each array element corresponds to a ranges source (unavailable,
841 optimized out). '1' is for VAL1, '2' for VAL2. */
842 struct ranges_and_idx rp1[2], rp2[2];
843
844 /* See function description in value.h. */
845 gdb_assert (!val1->lazy && !val2->lazy);
846
847 /* We shouldn't be trying to compare past the end of the values. */
848 gdb_assert (offset1 + length
849 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
850 gdb_assert (offset2 + length
851 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
852
853 memset (&rp1, 0, sizeof (rp1));
854 memset (&rp2, 0, sizeof (rp2));
855 rp1[0].ranges = &val1->unavailable;
856 rp2[0].ranges = &val2->unavailable;
857 rp1[1].ranges = &val1->optimized_out;
858 rp2[1].ranges = &val2->optimized_out;
859
860 while (length > 0)
861 {
862 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
863 int i;
864
865 for (i = 0; i < 2; i++)
866 {
867 ULONGEST l_tmp, h_tmp;
868
869 /* The contents only match equal if the invalid/unavailable
870 contents ranges match as well. */
871 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
872 offset1, offset2, length,
873 &l_tmp, &h_tmp))
874 return false;
875
876 /* We're interested in the lowest/first range found. */
877 if (i == 0 || l_tmp < l)
878 {
879 l = l_tmp;
880 h = h_tmp;
881 }
882 }
883
884 /* Compare the available/valid contents. */
885 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
886 val2->contents.get (), offset2, l) != 0)
887 return false;
888
889 length -= h;
890 offset1 += h;
891 offset2 += h;
892 }
893
894 return true;
895 }
896
897 bool
898 value_contents_eq (const struct value *val1, LONGEST offset1,
899 const struct value *val2, LONGEST offset2,
900 LONGEST length)
901 {
902 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
903 val2, offset2 * TARGET_CHAR_BIT,
904 length * TARGET_CHAR_BIT);
905 }
906
907
908 /* The value-history records all the values printed by print commands
909 during this session. */
910
911 static std::vector<value_ref_ptr> value_history;
912
913 \f
914 /* List of all value objects currently allocated
915 (except for those released by calls to release_value)
916 This is so they can be freed after each command. */
917
918 static std::vector<value_ref_ptr> all_values;
919
920 /* Allocate a lazy value for type TYPE. Its actual content is
921 "lazily" allocated too: the content field of the return value is
922 NULL; it will be allocated when it is fetched from the target. */
923
924 struct value *
925 allocate_value_lazy (struct type *type)
926 {
927 struct value *val;
928
929 /* Call check_typedef on our type to make sure that, if TYPE
930 is a TYPE_CODE_TYPEDEF, its length is set to the length
931 of the target type instead of zero. However, we do not
932 replace the typedef type by the target type, because we want
933 to keep the typedef in order to be able to set the VAL's type
934 description correctly. */
935 check_typedef (type);
936
937 val = new struct value (type);
938
939 /* Values start out on the all_values chain. */
940 all_values.emplace_back (val);
941
942 return val;
943 }
944
945 /* The maximum size, in bytes, that GDB will try to allocate for a value.
946 The initial value of 64k was not selected for any specific reason, it is
947 just a reasonable starting point. */
948
949 static int max_value_size = 65536; /* 64k bytes */
950
951 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
952 LONGEST, otherwise GDB will not be able to parse integer values from the
953 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
954 be unable to parse "set max-value-size 2".
955
956 As we want a consistent GDB experience across hosts with different sizes
957 of LONGEST, this arbitrary minimum value was selected, so long as this
958 is bigger than LONGEST on all GDB supported hosts we're fine. */
959
960 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
961 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
962
963 /* Implement the "set max-value-size" command. */
964
965 static void
966 set_max_value_size (const char *args, int from_tty,
967 struct cmd_list_element *c)
968 {
969 gdb_assert (max_value_size == -1 || max_value_size >= 0);
970
971 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
972 {
973 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
974 error (_("max-value-size set too low, increasing to %d bytes"),
975 max_value_size);
976 }
977 }
978
979 /* Implement the "show max-value-size" command. */
980
981 static void
982 show_max_value_size (struct ui_file *file, int from_tty,
983 struct cmd_list_element *c, const char *value)
984 {
985 if (max_value_size == -1)
986 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
987 else
988 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
989 max_value_size);
990 }
991
992 /* Called before we attempt to allocate or reallocate a buffer for the
993 contents of a value. TYPE is the type of the value for which we are
994 allocating the buffer. If the buffer is too large (based on the user
995 controllable setting) then throw an error. If this function returns
996 then we should attempt to allocate the buffer. */
997
998 static void
999 check_type_length_before_alloc (const struct type *type)
1000 {
1001 ULONGEST length = TYPE_LENGTH (type);
1002
1003 if (max_value_size > -1 && length > max_value_size)
1004 {
1005 if (type->name () != NULL)
1006 error (_("value of type `%s' requires %s bytes, which is more "
1007 "than max-value-size"), type->name (), pulongest (length));
1008 else
1009 error (_("value requires %s bytes, which is more than "
1010 "max-value-size"), pulongest (length));
1011 }
1012 }
1013
1014 /* Allocate the contents of VAL if it has not been allocated yet. */
1015
1016 static void
1017 allocate_value_contents (struct value *val)
1018 {
1019 if (!val->contents)
1020 {
1021 check_type_length_before_alloc (val->enclosing_type);
1022 val->contents.reset
1023 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1024 }
1025 }
1026
1027 /* Allocate a value and its contents for type TYPE. */
1028
1029 struct value *
1030 allocate_value (struct type *type)
1031 {
1032 struct value *val = allocate_value_lazy (type);
1033
1034 allocate_value_contents (val);
1035 val->lazy = 0;
1036 return val;
1037 }
1038
1039 /* Allocate a value that has the correct length
1040 for COUNT repetitions of type TYPE. */
1041
1042 struct value *
1043 allocate_repeat_value (struct type *type, int count)
1044 {
1045 /* Despite the fact that we are really creating an array of TYPE here, we
1046 use the string lower bound as the array lower bound. This seems to
1047 work fine for now. */
1048 int low_bound = current_language->string_lower_bound ();
1049 /* FIXME-type-allocation: need a way to free this type when we are
1050 done with it. */
1051 struct type *array_type
1052 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1053
1054 return allocate_value (array_type);
1055 }
1056
1057 struct value *
1058 allocate_computed_value (struct type *type,
1059 const struct lval_funcs *funcs,
1060 void *closure)
1061 {
1062 struct value *v = allocate_value_lazy (type);
1063
1064 VALUE_LVAL (v) = lval_computed;
1065 v->location.computed.funcs = funcs;
1066 v->location.computed.closure = closure;
1067
1068 return v;
1069 }
1070
1071 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1072
1073 struct value *
1074 allocate_optimized_out_value (struct type *type)
1075 {
1076 struct value *retval = allocate_value_lazy (type);
1077
1078 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1079 set_value_lazy (retval, 0);
1080 return retval;
1081 }
1082
1083 /* Accessor methods. */
1084
1085 struct type *
1086 value_type (const struct value *value)
1087 {
1088 return value->type;
1089 }
1090 void
1091 deprecated_set_value_type (struct value *value, struct type *type)
1092 {
1093 value->type = type;
1094 }
1095
1096 LONGEST
1097 value_offset (const struct value *value)
1098 {
1099 return value->offset;
1100 }
1101 void
1102 set_value_offset (struct value *value, LONGEST offset)
1103 {
1104 value->offset = offset;
1105 }
1106
1107 LONGEST
1108 value_bitpos (const struct value *value)
1109 {
1110 return value->bitpos;
1111 }
1112 void
1113 set_value_bitpos (struct value *value, LONGEST bit)
1114 {
1115 value->bitpos = bit;
1116 }
1117
1118 LONGEST
1119 value_bitsize (const struct value *value)
1120 {
1121 return value->bitsize;
1122 }
1123 void
1124 set_value_bitsize (struct value *value, LONGEST bit)
1125 {
1126 value->bitsize = bit;
1127 }
1128
1129 struct value *
1130 value_parent (const struct value *value)
1131 {
1132 return value->parent.get ();
1133 }
1134
1135 /* See value.h. */
1136
1137 void
1138 set_value_parent (struct value *value, struct value *parent)
1139 {
1140 value->parent = value_ref_ptr::new_reference (parent);
1141 }
1142
1143 gdb_byte *
1144 value_contents_raw (struct value *value)
1145 {
1146 struct gdbarch *arch = get_value_arch (value);
1147 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1148
1149 allocate_value_contents (value);
1150 return value->contents.get () + value->embedded_offset * unit_size;
1151 }
1152
1153 gdb_byte *
1154 value_contents_all_raw (struct value *value)
1155 {
1156 allocate_value_contents (value);
1157 return value->contents.get ();
1158 }
1159
1160 struct type *
1161 value_enclosing_type (const struct value *value)
1162 {
1163 return value->enclosing_type;
1164 }
1165
1166 /* Look at value.h for description. */
1167
1168 struct type *
1169 value_actual_type (struct value *value, int resolve_simple_types,
1170 int *real_type_found)
1171 {
1172 struct value_print_options opts;
1173 struct type *result;
1174
1175 get_user_print_options (&opts);
1176
1177 if (real_type_found)
1178 *real_type_found = 0;
1179 result = value_type (value);
1180 if (opts.objectprint)
1181 {
1182 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1183 fetch its rtti type. */
1184 if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1185 && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1186 == TYPE_CODE_STRUCT)
1187 && !value_optimized_out (value))
1188 {
1189 struct type *real_type;
1190
1191 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1192 if (real_type)
1193 {
1194 if (real_type_found)
1195 *real_type_found = 1;
1196 result = real_type;
1197 }
1198 }
1199 else if (resolve_simple_types)
1200 {
1201 if (real_type_found)
1202 *real_type_found = 1;
1203 result = value_enclosing_type (value);
1204 }
1205 }
1206
1207 return result;
1208 }
1209
1210 void
1211 error_value_optimized_out (void)
1212 {
1213 error (_("value has been optimized out"));
1214 }
1215
1216 static void
1217 require_not_optimized_out (const struct value *value)
1218 {
1219 if (!value->optimized_out.empty ())
1220 {
1221 if (value->lval == lval_register)
1222 error (_("register has not been saved in frame"));
1223 else
1224 error_value_optimized_out ();
1225 }
1226 }
1227
1228 static void
1229 require_available (const struct value *value)
1230 {
1231 if (!value->unavailable.empty ())
1232 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1233 }
1234
1235 const gdb_byte *
1236 value_contents_for_printing (struct value *value)
1237 {
1238 if (value->lazy)
1239 value_fetch_lazy (value);
1240 return value->contents.get ();
1241 }
1242
1243 const gdb_byte *
1244 value_contents_for_printing_const (const struct value *value)
1245 {
1246 gdb_assert (!value->lazy);
1247 return value->contents.get ();
1248 }
1249
1250 const gdb_byte *
1251 value_contents_all (struct value *value)
1252 {
1253 const gdb_byte *result = value_contents_for_printing (value);
1254 require_not_optimized_out (value);
1255 require_available (value);
1256 return result;
1257 }
1258
1259 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1260 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1261
1262 static void
1263 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1264 const std::vector<range> &src_range, int src_bit_offset,
1265 int bit_length)
1266 {
1267 for (const range &r : src_range)
1268 {
1269 ULONGEST h, l;
1270
1271 l = std::max (r.offset, (LONGEST) src_bit_offset);
1272 h = std::min (r.offset + r.length,
1273 (LONGEST) src_bit_offset + bit_length);
1274
1275 if (l < h)
1276 insert_into_bit_range_vector (dst_range,
1277 dst_bit_offset + (l - src_bit_offset),
1278 h - l);
1279 }
1280 }
1281
1282 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1283 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1284
1285 static void
1286 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1287 const struct value *src, int src_bit_offset,
1288 int bit_length)
1289 {
1290 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1291 src->unavailable, src_bit_offset,
1292 bit_length);
1293 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1294 src->optimized_out, src_bit_offset,
1295 bit_length);
1296 }
1297
1298 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1299 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1300 contents, starting at DST_OFFSET. If unavailable contents are
1301 being copied from SRC, the corresponding DST contents are marked
1302 unavailable accordingly. Neither DST nor SRC may be lazy
1303 values.
1304
1305 It is assumed the contents of DST in the [DST_OFFSET,
1306 DST_OFFSET+LENGTH) range are wholly available. */
1307
1308 static void
1309 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1310 struct value *src, LONGEST src_offset, LONGEST length)
1311 {
1312 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1313 struct gdbarch *arch = get_value_arch (src);
1314 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1315
1316 /* A lazy DST would make that this copy operation useless, since as
1317 soon as DST's contents were un-lazied (by a later value_contents
1318 call, say), the contents would be overwritten. A lazy SRC would
1319 mean we'd be copying garbage. */
1320 gdb_assert (!dst->lazy && !src->lazy);
1321
1322 /* The overwritten DST range gets unavailability ORed in, not
1323 replaced. Make sure to remember to implement replacing if it
1324 turns out actually necessary. */
1325 gdb_assert (value_bytes_available (dst, dst_offset, length));
1326 gdb_assert (!value_bits_any_optimized_out (dst,
1327 TARGET_CHAR_BIT * dst_offset,
1328 TARGET_CHAR_BIT * length));
1329
1330 /* Copy the data. */
1331 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1332 value_contents_all_raw (src) + src_offset * unit_size,
1333 length * unit_size);
1334
1335 /* Copy the meta-data, adjusted. */
1336 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1337 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1338 bit_length = length * unit_size * HOST_CHAR_BIT;
1339
1340 value_ranges_copy_adjusted (dst, dst_bit_offset,
1341 src, src_bit_offset,
1342 bit_length);
1343 }
1344
1345 /* Copy LENGTH bytes of SRC value's (all) contents
1346 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1347 (all) contents, starting at DST_OFFSET. If unavailable contents
1348 are being copied from SRC, the corresponding DST contents are
1349 marked unavailable accordingly. DST must not be lazy. If SRC is
1350 lazy, it will be fetched now.
1351
1352 It is assumed the contents of DST in the [DST_OFFSET,
1353 DST_OFFSET+LENGTH) range are wholly available. */
1354
1355 void
1356 value_contents_copy (struct value *dst, LONGEST dst_offset,
1357 struct value *src, LONGEST src_offset, LONGEST length)
1358 {
1359 if (src->lazy)
1360 value_fetch_lazy (src);
1361
1362 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1363 }
1364
1365 int
1366 value_lazy (const struct value *value)
1367 {
1368 return value->lazy;
1369 }
1370
1371 void
1372 set_value_lazy (struct value *value, int val)
1373 {
1374 value->lazy = val;
1375 }
1376
1377 int
1378 value_stack (const struct value *value)
1379 {
1380 return value->stack;
1381 }
1382
1383 void
1384 set_value_stack (struct value *value, int val)
1385 {
1386 value->stack = val;
1387 }
1388
1389 const gdb_byte *
1390 value_contents (struct value *value)
1391 {
1392 const gdb_byte *result = value_contents_writeable (value);
1393 require_not_optimized_out (value);
1394 require_available (value);
1395 return result;
1396 }
1397
1398 gdb_byte *
1399 value_contents_writeable (struct value *value)
1400 {
1401 if (value->lazy)
1402 value_fetch_lazy (value);
1403 return value_contents_raw (value);
1404 }
1405
1406 int
1407 value_optimized_out (struct value *value)
1408 {
1409 /* We can only know if a value is optimized out once we have tried to
1410 fetch it. */
1411 if (value->optimized_out.empty () && value->lazy)
1412 {
1413 try
1414 {
1415 value_fetch_lazy (value);
1416 }
1417 catch (const gdb_exception_error &ex)
1418 {
1419 switch (ex.error)
1420 {
1421 case MEMORY_ERROR:
1422 case OPTIMIZED_OUT_ERROR:
1423 case NOT_AVAILABLE_ERROR:
1424 /* These can normally happen when we try to access an
1425 optimized out or unavailable register, either in a
1426 physical register or spilled to memory. */
1427 break;
1428 default:
1429 throw;
1430 }
1431 }
1432 }
1433
1434 return !value->optimized_out.empty ();
1435 }
1436
1437 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1438 the following LENGTH bytes. */
1439
1440 void
1441 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1442 {
1443 mark_value_bits_optimized_out (value,
1444 offset * TARGET_CHAR_BIT,
1445 length * TARGET_CHAR_BIT);
1446 }
1447
1448 /* See value.h. */
1449
1450 void
1451 mark_value_bits_optimized_out (struct value *value,
1452 LONGEST offset, LONGEST length)
1453 {
1454 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1455 }
1456
1457 int
1458 value_bits_synthetic_pointer (const struct value *value,
1459 LONGEST offset, LONGEST length)
1460 {
1461 if (value->lval != lval_computed
1462 || !value->location.computed.funcs->check_synthetic_pointer)
1463 return 0;
1464 return value->location.computed.funcs->check_synthetic_pointer (value,
1465 offset,
1466 length);
1467 }
1468
1469 LONGEST
1470 value_embedded_offset (const struct value *value)
1471 {
1472 return value->embedded_offset;
1473 }
1474
1475 void
1476 set_value_embedded_offset (struct value *value, LONGEST val)
1477 {
1478 value->embedded_offset = val;
1479 }
1480
1481 LONGEST
1482 value_pointed_to_offset (const struct value *value)
1483 {
1484 return value->pointed_to_offset;
1485 }
1486
1487 void
1488 set_value_pointed_to_offset (struct value *value, LONGEST val)
1489 {
1490 value->pointed_to_offset = val;
1491 }
1492
1493 const struct lval_funcs *
1494 value_computed_funcs (const struct value *v)
1495 {
1496 gdb_assert (value_lval_const (v) == lval_computed);
1497
1498 return v->location.computed.funcs;
1499 }
1500
1501 void *
1502 value_computed_closure (const struct value *v)
1503 {
1504 gdb_assert (v->lval == lval_computed);
1505
1506 return v->location.computed.closure;
1507 }
1508
1509 enum lval_type *
1510 deprecated_value_lval_hack (struct value *value)
1511 {
1512 return &value->lval;
1513 }
1514
1515 enum lval_type
1516 value_lval_const (const struct value *value)
1517 {
1518 return value->lval;
1519 }
1520
1521 CORE_ADDR
1522 value_address (const struct value *value)
1523 {
1524 if (value->lval != lval_memory)
1525 return 0;
1526 if (value->parent != NULL)
1527 return value_address (value->parent.get ()) + value->offset;
1528 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1529 {
1530 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1531 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1532 }
1533
1534 return value->location.address + value->offset;
1535 }
1536
1537 CORE_ADDR
1538 value_raw_address (const struct value *value)
1539 {
1540 if (value->lval != lval_memory)
1541 return 0;
1542 return value->location.address;
1543 }
1544
1545 void
1546 set_value_address (struct value *value, CORE_ADDR addr)
1547 {
1548 gdb_assert (value->lval == lval_memory);
1549 value->location.address = addr;
1550 }
1551
1552 struct internalvar **
1553 deprecated_value_internalvar_hack (struct value *value)
1554 {
1555 return &value->location.internalvar;
1556 }
1557
1558 struct frame_id *
1559 deprecated_value_next_frame_id_hack (struct value *value)
1560 {
1561 gdb_assert (value->lval == lval_register);
1562 return &value->location.reg.next_frame_id;
1563 }
1564
1565 int *
1566 deprecated_value_regnum_hack (struct value *value)
1567 {
1568 gdb_assert (value->lval == lval_register);
1569 return &value->location.reg.regnum;
1570 }
1571
1572 int
1573 deprecated_value_modifiable (const struct value *value)
1574 {
1575 return value->modifiable;
1576 }
1577 \f
1578 /* Return a mark in the value chain. All values allocated after the
1579 mark is obtained (except for those released) are subject to being freed
1580 if a subsequent value_free_to_mark is passed the mark. */
1581 struct value *
1582 value_mark (void)
1583 {
1584 if (all_values.empty ())
1585 return nullptr;
1586 return all_values.back ().get ();
1587 }
1588
1589 /* See value.h. */
1590
1591 void
1592 value_incref (struct value *val)
1593 {
1594 val->reference_count++;
1595 }
1596
1597 /* Release a reference to VAL, which was acquired with value_incref.
1598 This function is also called to deallocate values from the value
1599 chain. */
1600
1601 void
1602 value_decref (struct value *val)
1603 {
1604 if (val != nullptr)
1605 {
1606 gdb_assert (val->reference_count > 0);
1607 val->reference_count--;
1608 if (val->reference_count == 0)
1609 delete val;
1610 }
1611 }
1612
1613 /* Free all values allocated since MARK was obtained by value_mark
1614 (except for those released). */
1615 void
1616 value_free_to_mark (const struct value *mark)
1617 {
1618 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1619 if (iter == all_values.end ())
1620 all_values.clear ();
1621 else
1622 all_values.erase (iter + 1, all_values.end ());
1623 }
1624
1625 /* Remove VAL from the chain all_values
1626 so it will not be freed automatically. */
1627
1628 value_ref_ptr
1629 release_value (struct value *val)
1630 {
1631 if (val == nullptr)
1632 return value_ref_ptr ();
1633
1634 std::vector<value_ref_ptr>::reverse_iterator iter;
1635 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1636 {
1637 if (*iter == val)
1638 {
1639 value_ref_ptr result = *iter;
1640 all_values.erase (iter.base () - 1);
1641 return result;
1642 }
1643 }
1644
1645 /* We must always return an owned reference. Normally this happens
1646 because we transfer the reference from the value chain, but in
1647 this case the value was not on the chain. */
1648 return value_ref_ptr::new_reference (val);
1649 }
1650
1651 /* See value.h. */
1652
1653 std::vector<value_ref_ptr>
1654 value_release_to_mark (const struct value *mark)
1655 {
1656 std::vector<value_ref_ptr> result;
1657
1658 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1659 if (iter == all_values.end ())
1660 std::swap (result, all_values);
1661 else
1662 {
1663 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1664 all_values.erase (iter + 1, all_values.end ());
1665 }
1666 std::reverse (result.begin (), result.end ());
1667 return result;
1668 }
1669
1670 /* Return a copy of the value ARG.
1671 It contains the same contents, for same memory address,
1672 but it's a different block of storage. */
1673
1674 struct value *
1675 value_copy (struct value *arg)
1676 {
1677 struct type *encl_type = value_enclosing_type (arg);
1678 struct value *val;
1679
1680 if (value_lazy (arg))
1681 val = allocate_value_lazy (encl_type);
1682 else
1683 val = allocate_value (encl_type);
1684 val->type = arg->type;
1685 VALUE_LVAL (val) = VALUE_LVAL (arg);
1686 val->location = arg->location;
1687 val->offset = arg->offset;
1688 val->bitpos = arg->bitpos;
1689 val->bitsize = arg->bitsize;
1690 val->lazy = arg->lazy;
1691 val->embedded_offset = value_embedded_offset (arg);
1692 val->pointed_to_offset = arg->pointed_to_offset;
1693 val->modifiable = arg->modifiable;
1694 if (!value_lazy (val))
1695 {
1696 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1697 TYPE_LENGTH (value_enclosing_type (arg)));
1698
1699 }
1700 val->unavailable = arg->unavailable;
1701 val->optimized_out = arg->optimized_out;
1702 val->parent = arg->parent;
1703 if (VALUE_LVAL (val) == lval_computed)
1704 {
1705 const struct lval_funcs *funcs = val->location.computed.funcs;
1706
1707 if (funcs->copy_closure)
1708 val->location.computed.closure = funcs->copy_closure (val);
1709 }
1710 return val;
1711 }
1712
1713 /* Return a "const" and/or "volatile" qualified version of the value V.
1714 If CNST is true, then the returned value will be qualified with
1715 "const".
1716 if VOLTL is true, then the returned value will be qualified with
1717 "volatile". */
1718
1719 struct value *
1720 make_cv_value (int cnst, int voltl, struct value *v)
1721 {
1722 struct type *val_type = value_type (v);
1723 struct type *enclosing_type = value_enclosing_type (v);
1724 struct value *cv_val = value_copy (v);
1725
1726 deprecated_set_value_type (cv_val,
1727 make_cv_type (cnst, voltl, val_type, NULL));
1728 set_value_enclosing_type (cv_val,
1729 make_cv_type (cnst, voltl, enclosing_type, NULL));
1730
1731 return cv_val;
1732 }
1733
1734 /* Return a version of ARG that is non-lvalue. */
1735
1736 struct value *
1737 value_non_lval (struct value *arg)
1738 {
1739 if (VALUE_LVAL (arg) != not_lval)
1740 {
1741 struct type *enc_type = value_enclosing_type (arg);
1742 struct value *val = allocate_value (enc_type);
1743
1744 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1745 TYPE_LENGTH (enc_type));
1746 val->type = arg->type;
1747 set_value_embedded_offset (val, value_embedded_offset (arg));
1748 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1749 return val;
1750 }
1751 return arg;
1752 }
1753
1754 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1755
1756 void
1757 value_force_lval (struct value *v, CORE_ADDR addr)
1758 {
1759 gdb_assert (VALUE_LVAL (v) == not_lval);
1760
1761 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1762 v->lval = lval_memory;
1763 v->location.address = addr;
1764 }
1765
1766 void
1767 set_value_component_location (struct value *component,
1768 const struct value *whole)
1769 {
1770 struct type *type;
1771
1772 gdb_assert (whole->lval != lval_xcallable);
1773
1774 if (whole->lval == lval_internalvar)
1775 VALUE_LVAL (component) = lval_internalvar_component;
1776 else
1777 VALUE_LVAL (component) = whole->lval;
1778
1779 component->location = whole->location;
1780 if (whole->lval == lval_computed)
1781 {
1782 const struct lval_funcs *funcs = whole->location.computed.funcs;
1783
1784 if (funcs->copy_closure)
1785 component->location.computed.closure = funcs->copy_closure (whole);
1786 }
1787
1788 /* If the WHOLE value has a dynamically resolved location property then
1789 update the address of the COMPONENT. */
1790 type = value_type (whole);
1791 if (NULL != TYPE_DATA_LOCATION (type)
1792 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1793 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1794
1795 /* Similarly, if the COMPONENT value has a dynamically resolved location
1796 property then update its address. */
1797 type = value_type (component);
1798 if (NULL != TYPE_DATA_LOCATION (type)
1799 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1800 {
1801 /* If the COMPONENT has a dynamic location, and is an
1802 lval_internalvar_component, then we change it to a lval_memory.
1803
1804 Usually a component of an internalvar is created non-lazy, and has
1805 its content immediately copied from the parent internalvar.
1806 However, for components with a dynamic location, the content of
1807 the component is not contained within the parent, but is instead
1808 accessed indirectly. Further, the component will be created as a
1809 lazy value.
1810
1811 By changing the type of the component to lval_memory we ensure
1812 that value_fetch_lazy can successfully load the component.
1813
1814 This solution isn't ideal, but a real fix would require values to
1815 carry around both the parent value contents, and the contents of
1816 any dynamic fields within the parent. This is a substantial
1817 change to how values work in GDB. */
1818 if (VALUE_LVAL (component) == lval_internalvar_component)
1819 {
1820 gdb_assert (value_lazy (component));
1821 VALUE_LVAL (component) = lval_memory;
1822 }
1823 else
1824 gdb_assert (VALUE_LVAL (component) == lval_memory);
1825 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1826 }
1827 }
1828
1829 /* Access to the value history. */
1830
1831 /* Record a new value in the value history.
1832 Returns the absolute history index of the entry. */
1833
1834 int
1835 record_latest_value (struct value *val)
1836 {
1837 /* We don't want this value to have anything to do with the inferior anymore.
1838 In particular, "set $1 = 50" should not affect the variable from which
1839 the value was taken, and fast watchpoints should be able to assume that
1840 a value on the value history never changes. */
1841 if (value_lazy (val))
1842 value_fetch_lazy (val);
1843 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1844 from. This is a bit dubious, because then *&$1 does not just return $1
1845 but the current contents of that location. c'est la vie... */
1846 val->modifiable = 0;
1847
1848 value_history.push_back (release_value (val));
1849
1850 return value_history.size ();
1851 }
1852
1853 /* Return a copy of the value in the history with sequence number NUM. */
1854
1855 struct value *
1856 access_value_history (int num)
1857 {
1858 int absnum = num;
1859
1860 if (absnum <= 0)
1861 absnum += value_history.size ();
1862
1863 if (absnum <= 0)
1864 {
1865 if (num == 0)
1866 error (_("The history is empty."));
1867 else if (num == 1)
1868 error (_("There is only one value in the history."));
1869 else
1870 error (_("History does not go back to $$%d."), -num);
1871 }
1872 if (absnum > value_history.size ())
1873 error (_("History has not yet reached $%d."), absnum);
1874
1875 absnum--;
1876
1877 return value_copy (value_history[absnum].get ());
1878 }
1879
1880 static void
1881 show_values (const char *num_exp, int from_tty)
1882 {
1883 int i;
1884 struct value *val;
1885 static int num = 1;
1886
1887 if (num_exp)
1888 {
1889 /* "show values +" should print from the stored position.
1890 "show values <exp>" should print around value number <exp>. */
1891 if (num_exp[0] != '+' || num_exp[1] != '\0')
1892 num = parse_and_eval_long (num_exp) - 5;
1893 }
1894 else
1895 {
1896 /* "show values" means print the last 10 values. */
1897 num = value_history.size () - 9;
1898 }
1899
1900 if (num <= 0)
1901 num = 1;
1902
1903 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1904 {
1905 struct value_print_options opts;
1906
1907 val = access_value_history (i);
1908 printf_filtered (("$%d = "), i);
1909 get_user_print_options (&opts);
1910 value_print (val, gdb_stdout, &opts);
1911 printf_filtered (("\n"));
1912 }
1913
1914 /* The next "show values +" should start after what we just printed. */
1915 num += 10;
1916
1917 /* Hitting just return after this command should do the same thing as
1918 "show values +". If num_exp is null, this is unnecessary, since
1919 "show values +" is not useful after "show values". */
1920 if (from_tty && num_exp)
1921 set_repeat_arguments ("+");
1922 }
1923 \f
1924 enum internalvar_kind
1925 {
1926 /* The internal variable is empty. */
1927 INTERNALVAR_VOID,
1928
1929 /* The value of the internal variable is provided directly as
1930 a GDB value object. */
1931 INTERNALVAR_VALUE,
1932
1933 /* A fresh value is computed via a call-back routine on every
1934 access to the internal variable. */
1935 INTERNALVAR_MAKE_VALUE,
1936
1937 /* The internal variable holds a GDB internal convenience function. */
1938 INTERNALVAR_FUNCTION,
1939
1940 /* The variable holds an integer value. */
1941 INTERNALVAR_INTEGER,
1942
1943 /* The variable holds a GDB-provided string. */
1944 INTERNALVAR_STRING,
1945 };
1946
1947 union internalvar_data
1948 {
1949 /* A value object used with INTERNALVAR_VALUE. */
1950 struct value *value;
1951
1952 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1953 struct
1954 {
1955 /* The functions to call. */
1956 const struct internalvar_funcs *functions;
1957
1958 /* The function's user-data. */
1959 void *data;
1960 } make_value;
1961
1962 /* The internal function used with INTERNALVAR_FUNCTION. */
1963 struct
1964 {
1965 struct internal_function *function;
1966 /* True if this is the canonical name for the function. */
1967 int canonical;
1968 } fn;
1969
1970 /* An integer value used with INTERNALVAR_INTEGER. */
1971 struct
1972 {
1973 /* If type is non-NULL, it will be used as the type to generate
1974 a value for this internal variable. If type is NULL, a default
1975 integer type for the architecture is used. */
1976 struct type *type;
1977 LONGEST val;
1978 } integer;
1979
1980 /* A string value used with INTERNALVAR_STRING. */
1981 char *string;
1982 };
1983
1984 /* Internal variables. These are variables within the debugger
1985 that hold values assigned by debugger commands.
1986 The user refers to them with a '$' prefix
1987 that does not appear in the variable names stored internally. */
1988
1989 struct internalvar
1990 {
1991 struct internalvar *next;
1992 char *name;
1993
1994 /* We support various different kinds of content of an internal variable.
1995 enum internalvar_kind specifies the kind, and union internalvar_data
1996 provides the data associated with this particular kind. */
1997
1998 enum internalvar_kind kind;
1999
2000 union internalvar_data u;
2001 };
2002
2003 static struct internalvar *internalvars;
2004
2005 /* If the variable does not already exist create it and give it the
2006 value given. If no value is given then the default is zero. */
2007 static void
2008 init_if_undefined_command (const char* args, int from_tty)
2009 {
2010 struct internalvar *intvar = nullptr;
2011
2012 /* Parse the expression - this is taken from set_command(). */
2013 expression_up expr = parse_expression (args);
2014
2015 /* Validate the expression.
2016 Was the expression an assignment?
2017 Or even an expression at all? */
2018 if (expr->first_opcode () != BINOP_ASSIGN)
2019 error (_("Init-if-undefined requires an assignment expression."));
2020
2021 /* Extract the variable from the parsed expression. */
2022 expr::assign_operation *assign
2023 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2024 if (assign != nullptr)
2025 {
2026 expr::operation *lhs = assign->get_lhs ();
2027 expr::internalvar_operation *ivarop
2028 = dynamic_cast<expr::internalvar_operation *> (lhs);
2029 if (ivarop != nullptr)
2030 intvar = ivarop->get_internalvar ();
2031 }
2032
2033 if (intvar == nullptr)
2034 error (_("The first parameter to init-if-undefined "
2035 "should be a GDB variable."));
2036
2037 /* Only evaluate the expression if the lvalue is void.
2038 This may still fail if the expression is invalid. */
2039 if (intvar->kind == INTERNALVAR_VOID)
2040 evaluate_expression (expr.get ());
2041 }
2042
2043
2044 /* Look up an internal variable with name NAME. NAME should not
2045 normally include a dollar sign.
2046
2047 If the specified internal variable does not exist,
2048 the return value is NULL. */
2049
2050 struct internalvar *
2051 lookup_only_internalvar (const char *name)
2052 {
2053 struct internalvar *var;
2054
2055 for (var = internalvars; var; var = var->next)
2056 if (strcmp (var->name, name) == 0)
2057 return var;
2058
2059 return NULL;
2060 }
2061
2062 /* Complete NAME by comparing it to the names of internal
2063 variables. */
2064
2065 void
2066 complete_internalvar (completion_tracker &tracker, const char *name)
2067 {
2068 struct internalvar *var;
2069 int len;
2070
2071 len = strlen (name);
2072
2073 for (var = internalvars; var; var = var->next)
2074 if (strncmp (var->name, name, len) == 0)
2075 tracker.add_completion (make_unique_xstrdup (var->name));
2076 }
2077
2078 /* Create an internal variable with name NAME and with a void value.
2079 NAME should not normally include a dollar sign. */
2080
2081 struct internalvar *
2082 create_internalvar (const char *name)
2083 {
2084 struct internalvar *var = XNEW (struct internalvar);
2085
2086 var->name = xstrdup (name);
2087 var->kind = INTERNALVAR_VOID;
2088 var->next = internalvars;
2089 internalvars = var;
2090 return var;
2091 }
2092
2093 /* Create an internal variable with name NAME and register FUN as the
2094 function that value_of_internalvar uses to create a value whenever
2095 this variable is referenced. NAME should not normally include a
2096 dollar sign. DATA is passed uninterpreted to FUN when it is
2097 called. CLEANUP, if not NULL, is called when the internal variable
2098 is destroyed. It is passed DATA as its only argument. */
2099
2100 struct internalvar *
2101 create_internalvar_type_lazy (const char *name,
2102 const struct internalvar_funcs *funcs,
2103 void *data)
2104 {
2105 struct internalvar *var = create_internalvar (name);
2106
2107 var->kind = INTERNALVAR_MAKE_VALUE;
2108 var->u.make_value.functions = funcs;
2109 var->u.make_value.data = data;
2110 return var;
2111 }
2112
2113 /* See documentation in value.h. */
2114
2115 int
2116 compile_internalvar_to_ax (struct internalvar *var,
2117 struct agent_expr *expr,
2118 struct axs_value *value)
2119 {
2120 if (var->kind != INTERNALVAR_MAKE_VALUE
2121 || var->u.make_value.functions->compile_to_ax == NULL)
2122 return 0;
2123
2124 var->u.make_value.functions->compile_to_ax (var, expr, value,
2125 var->u.make_value.data);
2126 return 1;
2127 }
2128
2129 /* Look up an internal variable with name NAME. NAME should not
2130 normally include a dollar sign.
2131
2132 If the specified internal variable does not exist,
2133 one is created, with a void value. */
2134
2135 struct internalvar *
2136 lookup_internalvar (const char *name)
2137 {
2138 struct internalvar *var;
2139
2140 var = lookup_only_internalvar (name);
2141 if (var)
2142 return var;
2143
2144 return create_internalvar (name);
2145 }
2146
2147 /* Return current value of internal variable VAR. For variables that
2148 are not inherently typed, use a value type appropriate for GDBARCH. */
2149
2150 struct value *
2151 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2152 {
2153 struct value *val;
2154 struct trace_state_variable *tsv;
2155
2156 /* If there is a trace state variable of the same name, assume that
2157 is what we really want to see. */
2158 tsv = find_trace_state_variable (var->name);
2159 if (tsv)
2160 {
2161 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2162 &(tsv->value));
2163 if (tsv->value_known)
2164 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2165 tsv->value);
2166 else
2167 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2168 return val;
2169 }
2170
2171 switch (var->kind)
2172 {
2173 case INTERNALVAR_VOID:
2174 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2175 break;
2176
2177 case INTERNALVAR_FUNCTION:
2178 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2179 break;
2180
2181 case INTERNALVAR_INTEGER:
2182 if (!var->u.integer.type)
2183 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2184 var->u.integer.val);
2185 else
2186 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2187 break;
2188
2189 case INTERNALVAR_STRING:
2190 val = value_cstring (var->u.string, strlen (var->u.string),
2191 builtin_type (gdbarch)->builtin_char);
2192 break;
2193
2194 case INTERNALVAR_VALUE:
2195 val = value_copy (var->u.value);
2196 if (value_lazy (val))
2197 value_fetch_lazy (val);
2198 break;
2199
2200 case INTERNALVAR_MAKE_VALUE:
2201 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2202 var->u.make_value.data);
2203 break;
2204
2205 default:
2206 internal_error (__FILE__, __LINE__, _("bad kind"));
2207 }
2208
2209 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2210 on this value go back to affect the original internal variable.
2211
2212 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2213 no underlying modifiable state in the internal variable.
2214
2215 Likewise, if the variable's value is a computed lvalue, we want
2216 references to it to produce another computed lvalue, where
2217 references and assignments actually operate through the
2218 computed value's functions.
2219
2220 This means that internal variables with computed values
2221 behave a little differently from other internal variables:
2222 assignments to them don't just replace the previous value
2223 altogether. At the moment, this seems like the behavior we
2224 want. */
2225
2226 if (var->kind != INTERNALVAR_MAKE_VALUE
2227 && val->lval != lval_computed)
2228 {
2229 VALUE_LVAL (val) = lval_internalvar;
2230 VALUE_INTERNALVAR (val) = var;
2231 }
2232
2233 return val;
2234 }
2235
2236 int
2237 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2238 {
2239 if (var->kind == INTERNALVAR_INTEGER)
2240 {
2241 *result = var->u.integer.val;
2242 return 1;
2243 }
2244
2245 if (var->kind == INTERNALVAR_VALUE)
2246 {
2247 struct type *type = check_typedef (value_type (var->u.value));
2248
2249 if (type->code () == TYPE_CODE_INT)
2250 {
2251 *result = value_as_long (var->u.value);
2252 return 1;
2253 }
2254 }
2255
2256 return 0;
2257 }
2258
2259 static int
2260 get_internalvar_function (struct internalvar *var,
2261 struct internal_function **result)
2262 {
2263 switch (var->kind)
2264 {
2265 case INTERNALVAR_FUNCTION:
2266 *result = var->u.fn.function;
2267 return 1;
2268
2269 default:
2270 return 0;
2271 }
2272 }
2273
2274 void
2275 set_internalvar_component (struct internalvar *var,
2276 LONGEST offset, LONGEST bitpos,
2277 LONGEST bitsize, struct value *newval)
2278 {
2279 gdb_byte *addr;
2280 struct gdbarch *arch;
2281 int unit_size;
2282
2283 switch (var->kind)
2284 {
2285 case INTERNALVAR_VALUE:
2286 addr = value_contents_writeable (var->u.value);
2287 arch = get_value_arch (var->u.value);
2288 unit_size = gdbarch_addressable_memory_unit_size (arch);
2289
2290 if (bitsize)
2291 modify_field (value_type (var->u.value), addr + offset,
2292 value_as_long (newval), bitpos, bitsize);
2293 else
2294 memcpy (addr + offset * unit_size, value_contents (newval),
2295 TYPE_LENGTH (value_type (newval)));
2296 break;
2297
2298 default:
2299 /* We can never get a component of any other kind. */
2300 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2301 }
2302 }
2303
2304 void
2305 set_internalvar (struct internalvar *var, struct value *val)
2306 {
2307 enum internalvar_kind new_kind;
2308 union internalvar_data new_data = { 0 };
2309
2310 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2311 error (_("Cannot overwrite convenience function %s"), var->name);
2312
2313 /* Prepare new contents. */
2314 switch (check_typedef (value_type (val))->code ())
2315 {
2316 case TYPE_CODE_VOID:
2317 new_kind = INTERNALVAR_VOID;
2318 break;
2319
2320 case TYPE_CODE_INTERNAL_FUNCTION:
2321 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2322 new_kind = INTERNALVAR_FUNCTION;
2323 get_internalvar_function (VALUE_INTERNALVAR (val),
2324 &new_data.fn.function);
2325 /* Copies created here are never canonical. */
2326 break;
2327
2328 default:
2329 new_kind = INTERNALVAR_VALUE;
2330 struct value *copy = value_copy (val);
2331 copy->modifiable = 1;
2332
2333 /* Force the value to be fetched from the target now, to avoid problems
2334 later when this internalvar is referenced and the target is gone or
2335 has changed. */
2336 if (value_lazy (copy))
2337 value_fetch_lazy (copy);
2338
2339 /* Release the value from the value chain to prevent it from being
2340 deleted by free_all_values. From here on this function should not
2341 call error () until new_data is installed into the var->u to avoid
2342 leaking memory. */
2343 new_data.value = release_value (copy).release ();
2344
2345 /* Internal variables which are created from values with a dynamic
2346 location don't need the location property of the origin anymore.
2347 The resolved dynamic location is used prior then any other address
2348 when accessing the value.
2349 If we keep it, we would still refer to the origin value.
2350 Remove the location property in case it exist. */
2351 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2352
2353 break;
2354 }
2355
2356 /* Clean up old contents. */
2357 clear_internalvar (var);
2358
2359 /* Switch over. */
2360 var->kind = new_kind;
2361 var->u = new_data;
2362 /* End code which must not call error(). */
2363 }
2364
2365 void
2366 set_internalvar_integer (struct internalvar *var, LONGEST l)
2367 {
2368 /* Clean up old contents. */
2369 clear_internalvar (var);
2370
2371 var->kind = INTERNALVAR_INTEGER;
2372 var->u.integer.type = NULL;
2373 var->u.integer.val = l;
2374 }
2375
2376 void
2377 set_internalvar_string (struct internalvar *var, const char *string)
2378 {
2379 /* Clean up old contents. */
2380 clear_internalvar (var);
2381
2382 var->kind = INTERNALVAR_STRING;
2383 var->u.string = xstrdup (string);
2384 }
2385
2386 static void
2387 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2388 {
2389 /* Clean up old contents. */
2390 clear_internalvar (var);
2391
2392 var->kind = INTERNALVAR_FUNCTION;
2393 var->u.fn.function = f;
2394 var->u.fn.canonical = 1;
2395 /* Variables installed here are always the canonical version. */
2396 }
2397
2398 void
2399 clear_internalvar (struct internalvar *var)
2400 {
2401 /* Clean up old contents. */
2402 switch (var->kind)
2403 {
2404 case INTERNALVAR_VALUE:
2405 value_decref (var->u.value);
2406 break;
2407
2408 case INTERNALVAR_STRING:
2409 xfree (var->u.string);
2410 break;
2411
2412 case INTERNALVAR_MAKE_VALUE:
2413 if (var->u.make_value.functions->destroy != NULL)
2414 var->u.make_value.functions->destroy (var->u.make_value.data);
2415 break;
2416
2417 default:
2418 break;
2419 }
2420
2421 /* Reset to void kind. */
2422 var->kind = INTERNALVAR_VOID;
2423 }
2424
2425 const char *
2426 internalvar_name (const struct internalvar *var)
2427 {
2428 return var->name;
2429 }
2430
2431 static struct internal_function *
2432 create_internal_function (const char *name,
2433 internal_function_fn handler, void *cookie)
2434 {
2435 struct internal_function *ifn = XNEW (struct internal_function);
2436
2437 ifn->name = xstrdup (name);
2438 ifn->handler = handler;
2439 ifn->cookie = cookie;
2440 return ifn;
2441 }
2442
2443 const char *
2444 value_internal_function_name (struct value *val)
2445 {
2446 struct internal_function *ifn;
2447 int result;
2448
2449 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2450 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2451 gdb_assert (result);
2452
2453 return ifn->name;
2454 }
2455
2456 struct value *
2457 call_internal_function (struct gdbarch *gdbarch,
2458 const struct language_defn *language,
2459 struct value *func, int argc, struct value **argv)
2460 {
2461 struct internal_function *ifn;
2462 int result;
2463
2464 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2465 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2466 gdb_assert (result);
2467
2468 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2469 }
2470
2471 /* The 'function' command. This does nothing -- it is just a
2472 placeholder to let "help function NAME" work. This is also used as
2473 the implementation of the sub-command that is created when
2474 registering an internal function. */
2475 static void
2476 function_command (const char *command, int from_tty)
2477 {
2478 /* Do nothing. */
2479 }
2480
2481 /* Helper function that does the work for add_internal_function. */
2482
2483 static struct cmd_list_element *
2484 do_add_internal_function (const char *name, const char *doc,
2485 internal_function_fn handler, void *cookie)
2486 {
2487 struct internal_function *ifn;
2488 struct internalvar *var = lookup_internalvar (name);
2489
2490 ifn = create_internal_function (name, handler, cookie);
2491 set_internalvar_function (var, ifn);
2492
2493 return add_cmd (name, no_class, function_command, doc, &functionlist);
2494 }
2495
2496 /* See value.h. */
2497
2498 void
2499 add_internal_function (const char *name, const char *doc,
2500 internal_function_fn handler, void *cookie)
2501 {
2502 do_add_internal_function (name, doc, handler, cookie);
2503 }
2504
2505 /* See value.h. */
2506
2507 void
2508 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2509 gdb::unique_xmalloc_ptr<char> &&doc,
2510 internal_function_fn handler, void *cookie)
2511 {
2512 struct cmd_list_element *cmd
2513 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2514 doc.release ();
2515 cmd->doc_allocated = 1;
2516 name.release ();
2517 cmd->name_allocated = 1;
2518 }
2519
2520 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2521 prevent cycles / duplicates. */
2522
2523 void
2524 preserve_one_value (struct value *value, struct objfile *objfile,
2525 htab_t copied_types)
2526 {
2527 if (value->type->objfile_owner () == objfile)
2528 value->type = copy_type_recursive (objfile, value->type, copied_types);
2529
2530 if (value->enclosing_type->objfile_owner () == objfile)
2531 value->enclosing_type = copy_type_recursive (objfile,
2532 value->enclosing_type,
2533 copied_types);
2534 }
2535
2536 /* Likewise for internal variable VAR. */
2537
2538 static void
2539 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2540 htab_t copied_types)
2541 {
2542 switch (var->kind)
2543 {
2544 case INTERNALVAR_INTEGER:
2545 if (var->u.integer.type
2546 && var->u.integer.type->objfile_owner () == objfile)
2547 var->u.integer.type
2548 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2549 break;
2550
2551 case INTERNALVAR_VALUE:
2552 preserve_one_value (var->u.value, objfile, copied_types);
2553 break;
2554 }
2555 }
2556
2557 /* Update the internal variables and value history when OBJFILE is
2558 discarded; we must copy the types out of the objfile. New global types
2559 will be created for every convenience variable which currently points to
2560 this objfile's types, and the convenience variables will be adjusted to
2561 use the new global types. */
2562
2563 void
2564 preserve_values (struct objfile *objfile)
2565 {
2566 struct internalvar *var;
2567
2568 /* Create the hash table. We allocate on the objfile's obstack, since
2569 it is soon to be deleted. */
2570 htab_up copied_types = create_copied_types_hash (objfile);
2571
2572 for (const value_ref_ptr &item : value_history)
2573 preserve_one_value (item.get (), objfile, copied_types.get ());
2574
2575 for (var = internalvars; var; var = var->next)
2576 preserve_one_internalvar (var, objfile, copied_types.get ());
2577
2578 preserve_ext_lang_values (objfile, copied_types.get ());
2579 }
2580
2581 static void
2582 show_convenience (const char *ignore, int from_tty)
2583 {
2584 struct gdbarch *gdbarch = get_current_arch ();
2585 struct internalvar *var;
2586 int varseen = 0;
2587 struct value_print_options opts;
2588
2589 get_user_print_options (&opts);
2590 for (var = internalvars; var; var = var->next)
2591 {
2592
2593 if (!varseen)
2594 {
2595 varseen = 1;
2596 }
2597 printf_filtered (("$%s = "), var->name);
2598
2599 try
2600 {
2601 struct value *val;
2602
2603 val = value_of_internalvar (gdbarch, var);
2604 value_print (val, gdb_stdout, &opts);
2605 }
2606 catch (const gdb_exception_error &ex)
2607 {
2608 fprintf_styled (gdb_stdout, metadata_style.style (),
2609 _("<error: %s>"), ex.what ());
2610 }
2611
2612 printf_filtered (("\n"));
2613 }
2614 if (!varseen)
2615 {
2616 /* This text does not mention convenience functions on purpose.
2617 The user can't create them except via Python, and if Python support
2618 is installed this message will never be printed ($_streq will
2619 exist). */
2620 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2621 "Convenience variables have "
2622 "names starting with \"$\";\n"
2623 "use \"set\" as in \"set "
2624 "$foo = 5\" to define them.\n"));
2625 }
2626 }
2627 \f
2628
2629 /* See value.h. */
2630
2631 struct value *
2632 value_from_xmethod (xmethod_worker_up &&worker)
2633 {
2634 struct value *v;
2635
2636 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2637 v->lval = lval_xcallable;
2638 v->location.xm_worker = worker.release ();
2639 v->modifiable = 0;
2640
2641 return v;
2642 }
2643
2644 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2645
2646 struct type *
2647 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2648 {
2649 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2650 && method->lval == lval_xcallable && !argv.empty ());
2651
2652 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2653 }
2654
2655 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2656
2657 struct value *
2658 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2659 {
2660 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2661 && method->lval == lval_xcallable && !argv.empty ());
2662
2663 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2664 }
2665 \f
2666 /* Extract a value as a C number (either long or double).
2667 Knows how to convert fixed values to double, or
2668 floating values to long.
2669 Does not deallocate the value. */
2670
2671 LONGEST
2672 value_as_long (struct value *val)
2673 {
2674 /* This coerces arrays and functions, which is necessary (e.g.
2675 in disassemble_command). It also dereferences references, which
2676 I suspect is the most logical thing to do. */
2677 val = coerce_array (val);
2678 return unpack_long (value_type (val), value_contents (val));
2679 }
2680
2681 /* Extract a value as a C pointer. Does not deallocate the value.
2682 Note that val's type may not actually be a pointer; value_as_long
2683 handles all the cases. */
2684 CORE_ADDR
2685 value_as_address (struct value *val)
2686 {
2687 struct gdbarch *gdbarch = value_type (val)->arch ();
2688
2689 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2690 whether we want this to be true eventually. */
2691 #if 0
2692 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2693 non-address (e.g. argument to "signal", "info break", etc.), or
2694 for pointers to char, in which the low bits *are* significant. */
2695 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2696 #else
2697
2698 /* There are several targets (IA-64, PowerPC, and others) which
2699 don't represent pointers to functions as simply the address of
2700 the function's entry point. For example, on the IA-64, a
2701 function pointer points to a two-word descriptor, generated by
2702 the linker, which contains the function's entry point, and the
2703 value the IA-64 "global pointer" register should have --- to
2704 support position-independent code. The linker generates
2705 descriptors only for those functions whose addresses are taken.
2706
2707 On such targets, it's difficult for GDB to convert an arbitrary
2708 function address into a function pointer; it has to either find
2709 an existing descriptor for that function, or call malloc and
2710 build its own. On some targets, it is impossible for GDB to
2711 build a descriptor at all: the descriptor must contain a jump
2712 instruction; data memory cannot be executed; and code memory
2713 cannot be modified.
2714
2715 Upon entry to this function, if VAL is a value of type `function'
2716 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2717 value_address (val) is the address of the function. This is what
2718 you'll get if you evaluate an expression like `main'. The call
2719 to COERCE_ARRAY below actually does all the usual unary
2720 conversions, which includes converting values of type `function'
2721 to `pointer to function'. This is the challenging conversion
2722 discussed above. Then, `unpack_long' will convert that pointer
2723 back into an address.
2724
2725 So, suppose the user types `disassemble foo' on an architecture
2726 with a strange function pointer representation, on which GDB
2727 cannot build its own descriptors, and suppose further that `foo'
2728 has no linker-built descriptor. The address->pointer conversion
2729 will signal an error and prevent the command from running, even
2730 though the next step would have been to convert the pointer
2731 directly back into the same address.
2732
2733 The following shortcut avoids this whole mess. If VAL is a
2734 function, just return its address directly. */
2735 if (value_type (val)->code () == TYPE_CODE_FUNC
2736 || value_type (val)->code () == TYPE_CODE_METHOD)
2737 return value_address (val);
2738
2739 val = coerce_array (val);
2740
2741 /* Some architectures (e.g. Harvard), map instruction and data
2742 addresses onto a single large unified address space. For
2743 instance: An architecture may consider a large integer in the
2744 range 0x10000000 .. 0x1000ffff to already represent a data
2745 addresses (hence not need a pointer to address conversion) while
2746 a small integer would still need to be converted integer to
2747 pointer to address. Just assume such architectures handle all
2748 integer conversions in a single function. */
2749
2750 /* JimB writes:
2751
2752 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2753 must admonish GDB hackers to make sure its behavior matches the
2754 compiler's, whenever possible.
2755
2756 In general, I think GDB should evaluate expressions the same way
2757 the compiler does. When the user copies an expression out of
2758 their source code and hands it to a `print' command, they should
2759 get the same value the compiler would have computed. Any
2760 deviation from this rule can cause major confusion and annoyance,
2761 and needs to be justified carefully. In other words, GDB doesn't
2762 really have the freedom to do these conversions in clever and
2763 useful ways.
2764
2765 AndrewC pointed out that users aren't complaining about how GDB
2766 casts integers to pointers; they are complaining that they can't
2767 take an address from a disassembly listing and give it to `x/i'.
2768 This is certainly important.
2769
2770 Adding an architecture method like integer_to_address() certainly
2771 makes it possible for GDB to "get it right" in all circumstances
2772 --- the target has complete control over how things get done, so
2773 people can Do The Right Thing for their target without breaking
2774 anyone else. The standard doesn't specify how integers get
2775 converted to pointers; usually, the ABI doesn't either, but
2776 ABI-specific code is a more reasonable place to handle it. */
2777
2778 if (value_type (val)->code () != TYPE_CODE_PTR
2779 && !TYPE_IS_REFERENCE (value_type (val))
2780 && gdbarch_integer_to_address_p (gdbarch))
2781 return gdbarch_integer_to_address (gdbarch, value_type (val),
2782 value_contents (val));
2783
2784 return unpack_long (value_type (val), value_contents (val));
2785 #endif
2786 }
2787 \f
2788 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2789 as a long, or as a double, assuming the raw data is described
2790 by type TYPE. Knows how to convert different sizes of values
2791 and can convert between fixed and floating point. We don't assume
2792 any alignment for the raw data. Return value is in host byte order.
2793
2794 If you want functions and arrays to be coerced to pointers, and
2795 references to be dereferenced, call value_as_long() instead.
2796
2797 C++: It is assumed that the front-end has taken care of
2798 all matters concerning pointers to members. A pointer
2799 to member which reaches here is considered to be equivalent
2800 to an INT (or some size). After all, it is only an offset. */
2801
2802 LONGEST
2803 unpack_long (struct type *type, const gdb_byte *valaddr)
2804 {
2805 if (is_fixed_point_type (type))
2806 type = type->fixed_point_type_base_type ();
2807
2808 enum bfd_endian byte_order = type_byte_order (type);
2809 enum type_code code = type->code ();
2810 int len = TYPE_LENGTH (type);
2811 int nosign = type->is_unsigned ();
2812
2813 switch (code)
2814 {
2815 case TYPE_CODE_TYPEDEF:
2816 return unpack_long (check_typedef (type), valaddr);
2817 case TYPE_CODE_ENUM:
2818 case TYPE_CODE_FLAGS:
2819 case TYPE_CODE_BOOL:
2820 case TYPE_CODE_INT:
2821 case TYPE_CODE_CHAR:
2822 case TYPE_CODE_RANGE:
2823 case TYPE_CODE_MEMBERPTR:
2824 {
2825 LONGEST result;
2826
2827 if (type->bit_size_differs_p ())
2828 {
2829 unsigned bit_off = type->bit_offset ();
2830 unsigned bit_size = type->bit_size ();
2831 if (bit_size == 0)
2832 {
2833 /* unpack_bits_as_long doesn't handle this case the
2834 way we'd like, so handle it here. */
2835 result = 0;
2836 }
2837 else
2838 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2839 }
2840 else
2841 {
2842 if (nosign)
2843 result = extract_unsigned_integer (valaddr, len, byte_order);
2844 else
2845 result = extract_signed_integer (valaddr, len, byte_order);
2846 }
2847 if (code == TYPE_CODE_RANGE)
2848 result += type->bounds ()->bias;
2849 return result;
2850 }
2851
2852 case TYPE_CODE_FLT:
2853 case TYPE_CODE_DECFLOAT:
2854 return target_float_to_longest (valaddr, type);
2855
2856 case TYPE_CODE_FIXED_POINT:
2857 {
2858 gdb_mpq vq;
2859 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2860 byte_order, nosign,
2861 type->fixed_point_scaling_factor ());
2862
2863 gdb_mpz vz;
2864 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2865 return vz.as_integer<LONGEST> ();
2866 }
2867
2868 case TYPE_CODE_PTR:
2869 case TYPE_CODE_REF:
2870 case TYPE_CODE_RVALUE_REF:
2871 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2872 whether we want this to be true eventually. */
2873 return extract_typed_address (valaddr, type);
2874
2875 default:
2876 error (_("Value can't be converted to integer."));
2877 }
2878 }
2879
2880 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2881 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2882 We don't assume any alignment for the raw data. Return value is in
2883 host byte order.
2884
2885 If you want functions and arrays to be coerced to pointers, and
2886 references to be dereferenced, call value_as_address() instead.
2887
2888 C++: It is assumed that the front-end has taken care of
2889 all matters concerning pointers to members. A pointer
2890 to member which reaches here is considered to be equivalent
2891 to an INT (or some size). After all, it is only an offset. */
2892
2893 CORE_ADDR
2894 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2895 {
2896 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2897 whether we want this to be true eventually. */
2898 return unpack_long (type, valaddr);
2899 }
2900
2901 bool
2902 is_floating_value (struct value *val)
2903 {
2904 struct type *type = check_typedef (value_type (val));
2905
2906 if (is_floating_type (type))
2907 {
2908 if (!target_float_is_valid (value_contents (val), type))
2909 error (_("Invalid floating value found in program."));
2910 return true;
2911 }
2912
2913 return false;
2914 }
2915
2916 \f
2917 /* Get the value of the FIELDNO'th field (which must be static) of
2918 TYPE. */
2919
2920 struct value *
2921 value_static_field (struct type *type, int fieldno)
2922 {
2923 struct value *retval;
2924
2925 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2926 {
2927 case FIELD_LOC_KIND_PHYSADDR:
2928 retval = value_at_lazy (type->field (fieldno).type (),
2929 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2930 break;
2931 case FIELD_LOC_KIND_PHYSNAME:
2932 {
2933 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2934 /* TYPE_FIELD_NAME (type, fieldno); */
2935 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2936
2937 if (sym.symbol == NULL)
2938 {
2939 /* With some compilers, e.g. HP aCC, static data members are
2940 reported as non-debuggable symbols. */
2941 struct bound_minimal_symbol msym
2942 = lookup_minimal_symbol (phys_name, NULL, NULL);
2943 struct type *field_type = type->field (fieldno).type ();
2944
2945 if (!msym.minsym)
2946 retval = allocate_optimized_out_value (field_type);
2947 else
2948 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2949 }
2950 else
2951 retval = value_of_variable (sym.symbol, sym.block);
2952 break;
2953 }
2954 default:
2955 gdb_assert_not_reached ("unexpected field location kind");
2956 }
2957
2958 return retval;
2959 }
2960
2961 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2962 You have to be careful here, since the size of the data area for the value
2963 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2964 than the old enclosing type, you have to allocate more space for the
2965 data. */
2966
2967 void
2968 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2969 {
2970 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2971 {
2972 check_type_length_before_alloc (new_encl_type);
2973 val->contents
2974 .reset ((gdb_byte *) xrealloc (val->contents.release (),
2975 TYPE_LENGTH (new_encl_type)));
2976 }
2977
2978 val->enclosing_type = new_encl_type;
2979 }
2980
2981 /* Given a value ARG1 (offset by OFFSET bytes)
2982 of a struct or union type ARG_TYPE,
2983 extract and return the value of one of its (non-static) fields.
2984 FIELDNO says which field. */
2985
2986 struct value *
2987 value_primitive_field (struct value *arg1, LONGEST offset,
2988 int fieldno, struct type *arg_type)
2989 {
2990 struct value *v;
2991 struct type *type;
2992 struct gdbarch *arch = get_value_arch (arg1);
2993 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2994
2995 arg_type = check_typedef (arg_type);
2996 type = arg_type->field (fieldno).type ();
2997
2998 /* Call check_typedef on our type to make sure that, if TYPE
2999 is a TYPE_CODE_TYPEDEF, its length is set to the length
3000 of the target type instead of zero. However, we do not
3001 replace the typedef type by the target type, because we want
3002 to keep the typedef in order to be able to print the type
3003 description correctly. */
3004 check_typedef (type);
3005
3006 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3007 {
3008 /* Handle packed fields.
3009
3010 Create a new value for the bitfield, with bitpos and bitsize
3011 set. If possible, arrange offset and bitpos so that we can
3012 do a single aligned read of the size of the containing type.
3013 Otherwise, adjust offset to the byte containing the first
3014 bit. Assume that the address, offset, and embedded offset
3015 are sufficiently aligned. */
3016
3017 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3018 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3019
3020 v = allocate_value_lazy (type);
3021 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3022 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3023 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3024 v->bitpos = bitpos % container_bitsize;
3025 else
3026 v->bitpos = bitpos % 8;
3027 v->offset = (value_embedded_offset (arg1)
3028 + offset
3029 + (bitpos - v->bitpos) / 8);
3030 set_value_parent (v, arg1);
3031 if (!value_lazy (arg1))
3032 value_fetch_lazy (v);
3033 }
3034 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3035 {
3036 /* This field is actually a base subobject, so preserve the
3037 entire object's contents for later references to virtual
3038 bases, etc. */
3039 LONGEST boffset;
3040
3041 /* Lazy register values with offsets are not supported. */
3042 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3043 value_fetch_lazy (arg1);
3044
3045 /* We special case virtual inheritance here because this
3046 requires access to the contents, which we would rather avoid
3047 for references to ordinary fields of unavailable values. */
3048 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3049 boffset = baseclass_offset (arg_type, fieldno,
3050 value_contents (arg1),
3051 value_embedded_offset (arg1),
3052 value_address (arg1),
3053 arg1);
3054 else
3055 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3056
3057 if (value_lazy (arg1))
3058 v = allocate_value_lazy (value_enclosing_type (arg1));
3059 else
3060 {
3061 v = allocate_value (value_enclosing_type (arg1));
3062 value_contents_copy_raw (v, 0, arg1, 0,
3063 TYPE_LENGTH (value_enclosing_type (arg1)));
3064 }
3065 v->type = type;
3066 v->offset = value_offset (arg1);
3067 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3068 }
3069 else if (NULL != TYPE_DATA_LOCATION (type))
3070 {
3071 /* Field is a dynamic data member. */
3072
3073 gdb_assert (0 == offset);
3074 /* We expect an already resolved data location. */
3075 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3076 /* For dynamic data types defer memory allocation
3077 until we actual access the value. */
3078 v = allocate_value_lazy (type);
3079 }
3080 else
3081 {
3082 /* Plain old data member */
3083 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3084 / (HOST_CHAR_BIT * unit_size));
3085
3086 /* Lazy register values with offsets are not supported. */
3087 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3088 value_fetch_lazy (arg1);
3089
3090 if (value_lazy (arg1))
3091 v = allocate_value_lazy (type);
3092 else
3093 {
3094 v = allocate_value (type);
3095 value_contents_copy_raw (v, value_embedded_offset (v),
3096 arg1, value_embedded_offset (arg1) + offset,
3097 type_length_units (type));
3098 }
3099 v->offset = (value_offset (arg1) + offset
3100 + value_embedded_offset (arg1));
3101 }
3102 set_value_component_location (v, arg1);
3103 return v;
3104 }
3105
3106 /* Given a value ARG1 of a struct or union type,
3107 extract and return the value of one of its (non-static) fields.
3108 FIELDNO says which field. */
3109
3110 struct value *
3111 value_field (struct value *arg1, int fieldno)
3112 {
3113 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3114 }
3115
3116 /* Return a non-virtual function as a value.
3117 F is the list of member functions which contains the desired method.
3118 J is an index into F which provides the desired method.
3119
3120 We only use the symbol for its address, so be happy with either a
3121 full symbol or a minimal symbol. */
3122
3123 struct value *
3124 value_fn_field (struct value **arg1p, struct fn_field *f,
3125 int j, struct type *type,
3126 LONGEST offset)
3127 {
3128 struct value *v;
3129 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3130 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3131 struct symbol *sym;
3132 struct bound_minimal_symbol msym;
3133
3134 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3135 if (sym != NULL)
3136 {
3137 memset (&msym, 0, sizeof (msym));
3138 }
3139 else
3140 {
3141 gdb_assert (sym == NULL);
3142 msym = lookup_bound_minimal_symbol (physname);
3143 if (msym.minsym == NULL)
3144 return NULL;
3145 }
3146
3147 v = allocate_value (ftype);
3148 VALUE_LVAL (v) = lval_memory;
3149 if (sym)
3150 {
3151 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3152 }
3153 else
3154 {
3155 /* The minimal symbol might point to a function descriptor;
3156 resolve it to the actual code address instead. */
3157 struct objfile *objfile = msym.objfile;
3158 struct gdbarch *gdbarch = objfile->arch ();
3159
3160 set_value_address (v,
3161 gdbarch_convert_from_func_ptr_addr
3162 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3163 }
3164
3165 if (arg1p)
3166 {
3167 if (type != value_type (*arg1p))
3168 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3169 value_addr (*arg1p)));
3170
3171 /* Move the `this' pointer according to the offset.
3172 VALUE_OFFSET (*arg1p) += offset; */
3173 }
3174
3175 return v;
3176 }
3177
3178 \f
3179
3180 /* See value.h. */
3181
3182 LONGEST
3183 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3184 LONGEST bitpos, LONGEST bitsize)
3185 {
3186 enum bfd_endian byte_order = type_byte_order (field_type);
3187 ULONGEST val;
3188 ULONGEST valmask;
3189 int lsbcount;
3190 LONGEST bytes_read;
3191 LONGEST read_offset;
3192
3193 /* Read the minimum number of bytes required; there may not be
3194 enough bytes to read an entire ULONGEST. */
3195 field_type = check_typedef (field_type);
3196 if (bitsize)
3197 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3198 else
3199 {
3200 bytes_read = TYPE_LENGTH (field_type);
3201 bitsize = 8 * bytes_read;
3202 }
3203
3204 read_offset = bitpos / 8;
3205
3206 val = extract_unsigned_integer (valaddr + read_offset,
3207 bytes_read, byte_order);
3208
3209 /* Extract bits. See comment above. */
3210
3211 if (byte_order == BFD_ENDIAN_BIG)
3212 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3213 else
3214 lsbcount = (bitpos % 8);
3215 val >>= lsbcount;
3216
3217 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3218 If the field is signed, and is negative, then sign extend. */
3219
3220 if (bitsize < 8 * (int) sizeof (val))
3221 {
3222 valmask = (((ULONGEST) 1) << bitsize) - 1;
3223 val &= valmask;
3224 if (!field_type->is_unsigned ())
3225 {
3226 if (val & (valmask ^ (valmask >> 1)))
3227 {
3228 val |= ~valmask;
3229 }
3230 }
3231 }
3232
3233 return val;
3234 }
3235
3236 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3237 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3238 ORIGINAL_VALUE, which must not be NULL. See
3239 unpack_value_bits_as_long for more details. */
3240
3241 int
3242 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3243 LONGEST embedded_offset, int fieldno,
3244 const struct value *val, LONGEST *result)
3245 {
3246 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3247 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3248 struct type *field_type = type->field (fieldno).type ();
3249 int bit_offset;
3250
3251 gdb_assert (val != NULL);
3252
3253 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3254 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3255 || !value_bits_available (val, bit_offset, bitsize))
3256 return 0;
3257
3258 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3259 bitpos, bitsize);
3260 return 1;
3261 }
3262
3263 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3264 object at VALADDR. See unpack_bits_as_long for more details. */
3265
3266 LONGEST
3267 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3268 {
3269 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3270 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3271 struct type *field_type = type->field (fieldno).type ();
3272
3273 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3274 }
3275
3276 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3277 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3278 the contents in DEST_VAL, zero or sign extending if the type of
3279 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3280 VAL. If the VAL's contents required to extract the bitfield from
3281 are unavailable/optimized out, DEST_VAL is correspondingly
3282 marked unavailable/optimized out. */
3283
3284 void
3285 unpack_value_bitfield (struct value *dest_val,
3286 LONGEST bitpos, LONGEST bitsize,
3287 const gdb_byte *valaddr, LONGEST embedded_offset,
3288 const struct value *val)
3289 {
3290 enum bfd_endian byte_order;
3291 int src_bit_offset;
3292 int dst_bit_offset;
3293 struct type *field_type = value_type (dest_val);
3294
3295 byte_order = type_byte_order (field_type);
3296
3297 /* First, unpack and sign extend the bitfield as if it was wholly
3298 valid. Optimized out/unavailable bits are read as zero, but
3299 that's OK, as they'll end up marked below. If the VAL is
3300 wholly-invalid we may have skipped allocating its contents,
3301 though. See allocate_optimized_out_value. */
3302 if (valaddr != NULL)
3303 {
3304 LONGEST num;
3305
3306 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3307 bitpos, bitsize);
3308 store_signed_integer (value_contents_raw (dest_val),
3309 TYPE_LENGTH (field_type), byte_order, num);
3310 }
3311
3312 /* Now copy the optimized out / unavailability ranges to the right
3313 bits. */
3314 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3315 if (byte_order == BFD_ENDIAN_BIG)
3316 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3317 else
3318 dst_bit_offset = 0;
3319 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3320 val, src_bit_offset, bitsize);
3321 }
3322
3323 /* Return a new value with type TYPE, which is FIELDNO field of the
3324 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3325 of VAL. If the VAL's contents required to extract the bitfield
3326 from are unavailable/optimized out, the new value is
3327 correspondingly marked unavailable/optimized out. */
3328
3329 struct value *
3330 value_field_bitfield (struct type *type, int fieldno,
3331 const gdb_byte *valaddr,
3332 LONGEST embedded_offset, const struct value *val)
3333 {
3334 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3335 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3336 struct value *res_val = allocate_value (type->field (fieldno).type ());
3337
3338 unpack_value_bitfield (res_val, bitpos, bitsize,
3339 valaddr, embedded_offset, val);
3340
3341 return res_val;
3342 }
3343
3344 /* Modify the value of a bitfield. ADDR points to a block of memory in
3345 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3346 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3347 indicate which bits (in target bit order) comprise the bitfield.
3348 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3349 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3350
3351 void
3352 modify_field (struct type *type, gdb_byte *addr,
3353 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3354 {
3355 enum bfd_endian byte_order = type_byte_order (type);
3356 ULONGEST oword;
3357 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3358 LONGEST bytesize;
3359
3360 /* Normalize BITPOS. */
3361 addr += bitpos / 8;
3362 bitpos %= 8;
3363
3364 /* If a negative fieldval fits in the field in question, chop
3365 off the sign extension bits. */
3366 if ((~fieldval & ~(mask >> 1)) == 0)
3367 fieldval &= mask;
3368
3369 /* Warn if value is too big to fit in the field in question. */
3370 if (0 != (fieldval & ~mask))
3371 {
3372 /* FIXME: would like to include fieldval in the message, but
3373 we don't have a sprintf_longest. */
3374 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3375
3376 /* Truncate it, otherwise adjoining fields may be corrupted. */
3377 fieldval &= mask;
3378 }
3379
3380 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3381 false valgrind reports. */
3382
3383 bytesize = (bitpos + bitsize + 7) / 8;
3384 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3385
3386 /* Shifting for bit field depends on endianness of the target machine. */
3387 if (byte_order == BFD_ENDIAN_BIG)
3388 bitpos = bytesize * 8 - bitpos - bitsize;
3389
3390 oword &= ~(mask << bitpos);
3391 oword |= fieldval << bitpos;
3392
3393 store_unsigned_integer (addr, bytesize, byte_order, oword);
3394 }
3395 \f
3396 /* Pack NUM into BUF using a target format of TYPE. */
3397
3398 void
3399 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3400 {
3401 enum bfd_endian byte_order = type_byte_order (type);
3402 LONGEST len;
3403
3404 type = check_typedef (type);
3405 len = TYPE_LENGTH (type);
3406
3407 switch (type->code ())
3408 {
3409 case TYPE_CODE_RANGE:
3410 num -= type->bounds ()->bias;
3411 /* Fall through. */
3412 case TYPE_CODE_INT:
3413 case TYPE_CODE_CHAR:
3414 case TYPE_CODE_ENUM:
3415 case TYPE_CODE_FLAGS:
3416 case TYPE_CODE_BOOL:
3417 case TYPE_CODE_MEMBERPTR:
3418 if (type->bit_size_differs_p ())
3419 {
3420 unsigned bit_off = type->bit_offset ();
3421 unsigned bit_size = type->bit_size ();
3422 num &= ((ULONGEST) 1 << bit_size) - 1;
3423 num <<= bit_off;
3424 }
3425 store_signed_integer (buf, len, byte_order, num);
3426 break;
3427
3428 case TYPE_CODE_REF:
3429 case TYPE_CODE_RVALUE_REF:
3430 case TYPE_CODE_PTR:
3431 store_typed_address (buf, type, (CORE_ADDR) num);
3432 break;
3433
3434 case TYPE_CODE_FLT:
3435 case TYPE_CODE_DECFLOAT:
3436 target_float_from_longest (buf, type, num);
3437 break;
3438
3439 default:
3440 error (_("Unexpected type (%d) encountered for integer constant."),
3441 type->code ());
3442 }
3443 }
3444
3445
3446 /* Pack NUM into BUF using a target format of TYPE. */
3447
3448 static void
3449 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3450 {
3451 LONGEST len;
3452 enum bfd_endian byte_order;
3453
3454 type = check_typedef (type);
3455 len = TYPE_LENGTH (type);
3456 byte_order = type_byte_order (type);
3457
3458 switch (type->code ())
3459 {
3460 case TYPE_CODE_INT:
3461 case TYPE_CODE_CHAR:
3462 case TYPE_CODE_ENUM:
3463 case TYPE_CODE_FLAGS:
3464 case TYPE_CODE_BOOL:
3465 case TYPE_CODE_RANGE:
3466 case TYPE_CODE_MEMBERPTR:
3467 if (type->bit_size_differs_p ())
3468 {
3469 unsigned bit_off = type->bit_offset ();
3470 unsigned bit_size = type->bit_size ();
3471 num &= ((ULONGEST) 1 << bit_size) - 1;
3472 num <<= bit_off;
3473 }
3474 store_unsigned_integer (buf, len, byte_order, num);
3475 break;
3476
3477 case TYPE_CODE_REF:
3478 case TYPE_CODE_RVALUE_REF:
3479 case TYPE_CODE_PTR:
3480 store_typed_address (buf, type, (CORE_ADDR) num);
3481 break;
3482
3483 case TYPE_CODE_FLT:
3484 case TYPE_CODE_DECFLOAT:
3485 target_float_from_ulongest (buf, type, num);
3486 break;
3487
3488 default:
3489 error (_("Unexpected type (%d) encountered "
3490 "for unsigned integer constant."),
3491 type->code ());
3492 }
3493 }
3494
3495
3496 /* Convert C numbers into newly allocated values. */
3497
3498 struct value *
3499 value_from_longest (struct type *type, LONGEST num)
3500 {
3501 struct value *val = allocate_value (type);
3502
3503 pack_long (value_contents_raw (val), type, num);
3504 return val;
3505 }
3506
3507
3508 /* Convert C unsigned numbers into newly allocated values. */
3509
3510 struct value *
3511 value_from_ulongest (struct type *type, ULONGEST num)
3512 {
3513 struct value *val = allocate_value (type);
3514
3515 pack_unsigned_long (value_contents_raw (val), type, num);
3516
3517 return val;
3518 }
3519
3520
3521 /* Create a value representing a pointer of type TYPE to the address
3522 ADDR. */
3523
3524 struct value *
3525 value_from_pointer (struct type *type, CORE_ADDR addr)
3526 {
3527 struct value *val = allocate_value (type);
3528
3529 store_typed_address (value_contents_raw (val),
3530 check_typedef (type), addr);
3531 return val;
3532 }
3533
3534 /* Create and return a value object of TYPE containing the value D. The
3535 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3536 it is converted to target format. */
3537
3538 struct value *
3539 value_from_host_double (struct type *type, double d)
3540 {
3541 struct value *value = allocate_value (type);
3542 gdb_assert (type->code () == TYPE_CODE_FLT);
3543 target_float_from_host_double (value_contents_raw (value),
3544 value_type (value), d);
3545 return value;
3546 }
3547
3548 /* Create a value of type TYPE whose contents come from VALADDR, if it
3549 is non-null, and whose memory address (in the inferior) is
3550 ADDRESS. The type of the created value may differ from the passed
3551 type TYPE. Make sure to retrieve values new type after this call.
3552 Note that TYPE is not passed through resolve_dynamic_type; this is
3553 a special API intended for use only by Ada. */
3554
3555 struct value *
3556 value_from_contents_and_address_unresolved (struct type *type,
3557 const gdb_byte *valaddr,
3558 CORE_ADDR address)
3559 {
3560 struct value *v;
3561
3562 if (valaddr == NULL)
3563 v = allocate_value_lazy (type);
3564 else
3565 v = value_from_contents (type, valaddr);
3566 VALUE_LVAL (v) = lval_memory;
3567 set_value_address (v, address);
3568 return v;
3569 }
3570
3571 /* Create a value of type TYPE whose contents come from VALADDR, if it
3572 is non-null, and whose memory address (in the inferior) is
3573 ADDRESS. The type of the created value may differ from the passed
3574 type TYPE. Make sure to retrieve values new type after this call. */
3575
3576 struct value *
3577 value_from_contents_and_address (struct type *type,
3578 const gdb_byte *valaddr,
3579 CORE_ADDR address)
3580 {
3581 gdb::array_view<const gdb_byte> view;
3582 if (valaddr != nullptr)
3583 view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3584 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3585 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3586 struct value *v;
3587
3588 if (valaddr == NULL)
3589 v = allocate_value_lazy (resolved_type);
3590 else
3591 v = value_from_contents (resolved_type, valaddr);
3592 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3593 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3594 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3595 VALUE_LVAL (v) = lval_memory;
3596 set_value_address (v, address);
3597 return v;
3598 }
3599
3600 /* Create a value of type TYPE holding the contents CONTENTS.
3601 The new value is `not_lval'. */
3602
3603 struct value *
3604 value_from_contents (struct type *type, const gdb_byte *contents)
3605 {
3606 struct value *result;
3607
3608 result = allocate_value (type);
3609 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3610 return result;
3611 }
3612
3613 /* Extract a value from the history file. Input will be of the form
3614 $digits or $$digits. See block comment above 'write_dollar_variable'
3615 for details. */
3616
3617 struct value *
3618 value_from_history_ref (const char *h, const char **endp)
3619 {
3620 int index, len;
3621
3622 if (h[0] == '$')
3623 len = 1;
3624 else
3625 return NULL;
3626
3627 if (h[1] == '$')
3628 len = 2;
3629
3630 /* Find length of numeral string. */
3631 for (; isdigit (h[len]); len++)
3632 ;
3633
3634 /* Make sure numeral string is not part of an identifier. */
3635 if (h[len] == '_' || isalpha (h[len]))
3636 return NULL;
3637
3638 /* Now collect the index value. */
3639 if (h[1] == '$')
3640 {
3641 if (len == 2)
3642 {
3643 /* For some bizarre reason, "$$" is equivalent to "$$1",
3644 rather than to "$$0" as it ought to be! */
3645 index = -1;
3646 *endp += len;
3647 }
3648 else
3649 {
3650 char *local_end;
3651
3652 index = -strtol (&h[2], &local_end, 10);
3653 *endp = local_end;
3654 }
3655 }
3656 else
3657 {
3658 if (len == 1)
3659 {
3660 /* "$" is equivalent to "$0". */
3661 index = 0;
3662 *endp += len;
3663 }
3664 else
3665 {
3666 char *local_end;
3667
3668 index = strtol (&h[1], &local_end, 10);
3669 *endp = local_end;
3670 }
3671 }
3672
3673 return access_value_history (index);
3674 }
3675
3676 /* Get the component value (offset by OFFSET bytes) of a struct or
3677 union WHOLE. Component's type is TYPE. */
3678
3679 struct value *
3680 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3681 {
3682 struct value *v;
3683
3684 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3685 v = allocate_value_lazy (type);
3686 else
3687 {
3688 v = allocate_value (type);
3689 value_contents_copy (v, value_embedded_offset (v),
3690 whole, value_embedded_offset (whole) + offset,
3691 type_length_units (type));
3692 }
3693 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3694 set_value_component_location (v, whole);
3695
3696 return v;
3697 }
3698
3699 struct value *
3700 coerce_ref_if_computed (const struct value *arg)
3701 {
3702 const struct lval_funcs *funcs;
3703
3704 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3705 return NULL;
3706
3707 if (value_lval_const (arg) != lval_computed)
3708 return NULL;
3709
3710 funcs = value_computed_funcs (arg);
3711 if (funcs->coerce_ref == NULL)
3712 return NULL;
3713
3714 return funcs->coerce_ref (arg);
3715 }
3716
3717 /* Look at value.h for description. */
3718
3719 struct value *
3720 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3721 const struct type *original_type,
3722 struct value *original_value,
3723 CORE_ADDR original_value_address)
3724 {
3725 gdb_assert (original_type->code () == TYPE_CODE_PTR
3726 || TYPE_IS_REFERENCE (original_type));
3727
3728 struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3729 gdb::array_view<const gdb_byte> view;
3730 struct type *resolved_original_target_type
3731 = resolve_dynamic_type (original_target_type, view,
3732 original_value_address);
3733
3734 /* Re-adjust type. */
3735 deprecated_set_value_type (value, resolved_original_target_type);
3736
3737 /* Add embedding info. */
3738 set_value_enclosing_type (value, enc_type);
3739 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3740
3741 /* We may be pointing to an object of some derived type. */
3742 return value_full_object (value, NULL, 0, 0, 0);
3743 }
3744
3745 struct value *
3746 coerce_ref (struct value *arg)
3747 {
3748 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3749 struct value *retval;
3750 struct type *enc_type;
3751
3752 retval = coerce_ref_if_computed (arg);
3753 if (retval)
3754 return retval;
3755
3756 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3757 return arg;
3758
3759 enc_type = check_typedef (value_enclosing_type (arg));
3760 enc_type = TYPE_TARGET_TYPE (enc_type);
3761
3762 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg));
3763 retval = value_at_lazy (enc_type, addr);
3764 enc_type = value_type (retval);
3765 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3766 arg, addr);
3767 }
3768
3769 struct value *
3770 coerce_array (struct value *arg)
3771 {
3772 struct type *type;
3773
3774 arg = coerce_ref (arg);
3775 type = check_typedef (value_type (arg));
3776
3777 switch (type->code ())
3778 {
3779 case TYPE_CODE_ARRAY:
3780 if (!type->is_vector () && current_language->c_style_arrays_p ())
3781 arg = value_coerce_array (arg);
3782 break;
3783 case TYPE_CODE_FUNC:
3784 arg = value_coerce_function (arg);
3785 break;
3786 }
3787 return arg;
3788 }
3789 \f
3790
3791 /* Return the return value convention that will be used for the
3792 specified type. */
3793
3794 enum return_value_convention
3795 struct_return_convention (struct gdbarch *gdbarch,
3796 struct value *function, struct type *value_type)
3797 {
3798 enum type_code code = value_type->code ();
3799
3800 if (code == TYPE_CODE_ERROR)
3801 error (_("Function return type unknown."));
3802
3803 /* Probe the architecture for the return-value convention. */
3804 return gdbarch_return_value (gdbarch, function, value_type,
3805 NULL, NULL, NULL);
3806 }
3807
3808 /* Return true if the function returning the specified type is using
3809 the convention of returning structures in memory (passing in the
3810 address as a hidden first parameter). */
3811
3812 int
3813 using_struct_return (struct gdbarch *gdbarch,
3814 struct value *function, struct type *value_type)
3815 {
3816 if (value_type->code () == TYPE_CODE_VOID)
3817 /* A void return value is never in memory. See also corresponding
3818 code in "print_return_value". */
3819 return 0;
3820
3821 return (struct_return_convention (gdbarch, function, value_type)
3822 != RETURN_VALUE_REGISTER_CONVENTION);
3823 }
3824
3825 /* Set the initialized field in a value struct. */
3826
3827 void
3828 set_value_initialized (struct value *val, int status)
3829 {
3830 val->initialized = status;
3831 }
3832
3833 /* Return the initialized field in a value struct. */
3834
3835 int
3836 value_initialized (const struct value *val)
3837 {
3838 return val->initialized;
3839 }
3840
3841 /* Helper for value_fetch_lazy when the value is a bitfield. */
3842
3843 static void
3844 value_fetch_lazy_bitfield (struct value *val)
3845 {
3846 gdb_assert (value_bitsize (val) != 0);
3847
3848 /* To read a lazy bitfield, read the entire enclosing value. This
3849 prevents reading the same block of (possibly volatile) memory once
3850 per bitfield. It would be even better to read only the containing
3851 word, but we have no way to record that just specific bits of a
3852 value have been fetched. */
3853 struct value *parent = value_parent (val);
3854
3855 if (value_lazy (parent))
3856 value_fetch_lazy (parent);
3857
3858 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3859 value_contents_for_printing (parent),
3860 value_offset (val), parent);
3861 }
3862
3863 /* Helper for value_fetch_lazy when the value is in memory. */
3864
3865 static void
3866 value_fetch_lazy_memory (struct value *val)
3867 {
3868 gdb_assert (VALUE_LVAL (val) == lval_memory);
3869
3870 CORE_ADDR addr = value_address (val);
3871 struct type *type = check_typedef (value_enclosing_type (val));
3872
3873 if (TYPE_LENGTH (type))
3874 read_value_memory (val, 0, value_stack (val),
3875 addr, value_contents_all_raw (val),
3876 type_length_units (type));
3877 }
3878
3879 /* Helper for value_fetch_lazy when the value is in a register. */
3880
3881 static void
3882 value_fetch_lazy_register (struct value *val)
3883 {
3884 struct frame_info *next_frame;
3885 int regnum;
3886 struct type *type = check_typedef (value_type (val));
3887 struct value *new_val = val, *mark = value_mark ();
3888
3889 /* Offsets are not supported here; lazy register values must
3890 refer to the entire register. */
3891 gdb_assert (value_offset (val) == 0);
3892
3893 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3894 {
3895 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3896
3897 next_frame = frame_find_by_id (next_frame_id);
3898 regnum = VALUE_REGNUM (new_val);
3899
3900 gdb_assert (next_frame != NULL);
3901
3902 /* Convertible register routines are used for multi-register
3903 values and for interpretation in different types
3904 (e.g. float or int from a double register). Lazy
3905 register values should have the register's natural type,
3906 so they do not apply. */
3907 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3908 regnum, type));
3909
3910 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3911 Since a "->next" operation was performed when setting
3912 this field, we do not need to perform a "next" operation
3913 again when unwinding the register. That's why
3914 frame_unwind_register_value() is called here instead of
3915 get_frame_register_value(). */
3916 new_val = frame_unwind_register_value (next_frame, regnum);
3917
3918 /* If we get another lazy lval_register value, it means the
3919 register is found by reading it from NEXT_FRAME's next frame.
3920 frame_unwind_register_value should never return a value with
3921 the frame id pointing to NEXT_FRAME. If it does, it means we
3922 either have two consecutive frames with the same frame id
3923 in the frame chain, or some code is trying to unwind
3924 behind get_prev_frame's back (e.g., a frame unwind
3925 sniffer trying to unwind), bypassing its validations. In
3926 any case, it should always be an internal error to end up
3927 in this situation. */
3928 if (VALUE_LVAL (new_val) == lval_register
3929 && value_lazy (new_val)
3930 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3931 internal_error (__FILE__, __LINE__,
3932 _("infinite loop while fetching a register"));
3933 }
3934
3935 /* If it's still lazy (for instance, a saved register on the
3936 stack), fetch it. */
3937 if (value_lazy (new_val))
3938 value_fetch_lazy (new_val);
3939
3940 /* Copy the contents and the unavailability/optimized-out
3941 meta-data from NEW_VAL to VAL. */
3942 set_value_lazy (val, 0);
3943 value_contents_copy (val, value_embedded_offset (val),
3944 new_val, value_embedded_offset (new_val),
3945 type_length_units (type));
3946
3947 if (frame_debug)
3948 {
3949 struct gdbarch *gdbarch;
3950 struct frame_info *frame;
3951 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3952 so that the frame level will be shown correctly. */
3953 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3954 regnum = VALUE_REGNUM (val);
3955 gdbarch = get_frame_arch (frame);
3956
3957 fprintf_unfiltered (gdb_stdlog,
3958 "{ value_fetch_lazy "
3959 "(frame=%d,regnum=%d(%s),...) ",
3960 frame_relative_level (frame), regnum,
3961 user_reg_map_regnum_to_name (gdbarch, regnum));
3962
3963 fprintf_unfiltered (gdb_stdlog, "->");
3964 if (value_optimized_out (new_val))
3965 {
3966 fprintf_unfiltered (gdb_stdlog, " ");
3967 val_print_optimized_out (new_val, gdb_stdlog);
3968 }
3969 else
3970 {
3971 int i;
3972 const gdb_byte *buf = value_contents (new_val);
3973
3974 if (VALUE_LVAL (new_val) == lval_register)
3975 fprintf_unfiltered (gdb_stdlog, " register=%d",
3976 VALUE_REGNUM (new_val));
3977 else if (VALUE_LVAL (new_val) == lval_memory)
3978 fprintf_unfiltered (gdb_stdlog, " address=%s",
3979 paddress (gdbarch,
3980 value_address (new_val)));
3981 else
3982 fprintf_unfiltered (gdb_stdlog, " computed");
3983
3984 fprintf_unfiltered (gdb_stdlog, " bytes=");
3985 fprintf_unfiltered (gdb_stdlog, "[");
3986 for (i = 0; i < register_size (gdbarch, regnum); i++)
3987 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3988 fprintf_unfiltered (gdb_stdlog, "]");
3989 }
3990
3991 fprintf_unfiltered (gdb_stdlog, " }\n");
3992 }
3993
3994 /* Dispose of the intermediate values. This prevents
3995 watchpoints from trying to watch the saved frame pointer. */
3996 value_free_to_mark (mark);
3997 }
3998
3999 /* Load the actual content of a lazy value. Fetch the data from the
4000 user's process and clear the lazy flag to indicate that the data in
4001 the buffer is valid.
4002
4003 If the value is zero-length, we avoid calling read_memory, which
4004 would abort. We mark the value as fetched anyway -- all 0 bytes of
4005 it. */
4006
4007 void
4008 value_fetch_lazy (struct value *val)
4009 {
4010 gdb_assert (value_lazy (val));
4011 allocate_value_contents (val);
4012 /* A value is either lazy, or fully fetched. The
4013 availability/validity is only established as we try to fetch a
4014 value. */
4015 gdb_assert (val->optimized_out.empty ());
4016 gdb_assert (val->unavailable.empty ());
4017 if (value_bitsize (val))
4018 value_fetch_lazy_bitfield (val);
4019 else if (VALUE_LVAL (val) == lval_memory)
4020 value_fetch_lazy_memory (val);
4021 else if (VALUE_LVAL (val) == lval_register)
4022 value_fetch_lazy_register (val);
4023 else if (VALUE_LVAL (val) == lval_computed
4024 && value_computed_funcs (val)->read != NULL)
4025 value_computed_funcs (val)->read (val);
4026 else
4027 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4028
4029 set_value_lazy (val, 0);
4030 }
4031
4032 /* Implementation of the convenience function $_isvoid. */
4033
4034 static struct value *
4035 isvoid_internal_fn (struct gdbarch *gdbarch,
4036 const struct language_defn *language,
4037 void *cookie, int argc, struct value **argv)
4038 {
4039 int ret;
4040
4041 if (argc != 1)
4042 error (_("You must provide one argument for $_isvoid."));
4043
4044 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4045
4046 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4047 }
4048
4049 /* Implementation of the convenience function $_creal. Extracts the
4050 real part from a complex number. */
4051
4052 static struct value *
4053 creal_internal_fn (struct gdbarch *gdbarch,
4054 const struct language_defn *language,
4055 void *cookie, int argc, struct value **argv)
4056 {
4057 if (argc != 1)
4058 error (_("You must provide one argument for $_creal."));
4059
4060 value *cval = argv[0];
4061 type *ctype = check_typedef (value_type (cval));
4062 if (ctype->code () != TYPE_CODE_COMPLEX)
4063 error (_("expected a complex number"));
4064 return value_real_part (cval);
4065 }
4066
4067 /* Implementation of the convenience function $_cimag. Extracts the
4068 imaginary part from a complex number. */
4069
4070 static struct value *
4071 cimag_internal_fn (struct gdbarch *gdbarch,
4072 const struct language_defn *language,
4073 void *cookie, int argc,
4074 struct value **argv)
4075 {
4076 if (argc != 1)
4077 error (_("You must provide one argument for $_cimag."));
4078
4079 value *cval = argv[0];
4080 type *ctype = check_typedef (value_type (cval));
4081 if (ctype->code () != TYPE_CODE_COMPLEX)
4082 error (_("expected a complex number"));
4083 return value_imaginary_part (cval);
4084 }
4085
4086 #if GDB_SELF_TEST
4087 namespace selftests
4088 {
4089
4090 /* Test the ranges_contain function. */
4091
4092 static void
4093 test_ranges_contain ()
4094 {
4095 std::vector<range> ranges;
4096 range r;
4097
4098 /* [10, 14] */
4099 r.offset = 10;
4100 r.length = 5;
4101 ranges.push_back (r);
4102
4103 /* [20, 24] */
4104 r.offset = 20;
4105 r.length = 5;
4106 ranges.push_back (r);
4107
4108 /* [2, 6] */
4109 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4110 /* [9, 13] */
4111 SELF_CHECK (ranges_contain (ranges, 9, 5));
4112 /* [10, 11] */
4113 SELF_CHECK (ranges_contain (ranges, 10, 2));
4114 /* [10, 14] */
4115 SELF_CHECK (ranges_contain (ranges, 10, 5));
4116 /* [13, 18] */
4117 SELF_CHECK (ranges_contain (ranges, 13, 6));
4118 /* [14, 18] */
4119 SELF_CHECK (ranges_contain (ranges, 14, 5));
4120 /* [15, 18] */
4121 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4122 /* [16, 19] */
4123 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4124 /* [16, 21] */
4125 SELF_CHECK (ranges_contain (ranges, 16, 6));
4126 /* [21, 21] */
4127 SELF_CHECK (ranges_contain (ranges, 21, 1));
4128 /* [21, 25] */
4129 SELF_CHECK (ranges_contain (ranges, 21, 5));
4130 /* [26, 28] */
4131 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4132 }
4133
4134 /* Check that RANGES contains the same ranges as EXPECTED. */
4135
4136 static bool
4137 check_ranges_vector (gdb::array_view<const range> ranges,
4138 gdb::array_view<const range> expected)
4139 {
4140 return ranges == expected;
4141 }
4142
4143 /* Test the insert_into_bit_range_vector function. */
4144
4145 static void
4146 test_insert_into_bit_range_vector ()
4147 {
4148 std::vector<range> ranges;
4149
4150 /* [10, 14] */
4151 {
4152 insert_into_bit_range_vector (&ranges, 10, 5);
4153 static const range expected[] = {
4154 {10, 5}
4155 };
4156 SELF_CHECK (check_ranges_vector (ranges, expected));
4157 }
4158
4159 /* [10, 14] */
4160 {
4161 insert_into_bit_range_vector (&ranges, 11, 4);
4162 static const range expected = {10, 5};
4163 SELF_CHECK (check_ranges_vector (ranges, expected));
4164 }
4165
4166 /* [10, 14] [20, 24] */
4167 {
4168 insert_into_bit_range_vector (&ranges, 20, 5);
4169 static const range expected[] = {
4170 {10, 5},
4171 {20, 5},
4172 };
4173 SELF_CHECK (check_ranges_vector (ranges, expected));
4174 }
4175
4176 /* [10, 14] [17, 24] */
4177 {
4178 insert_into_bit_range_vector (&ranges, 17, 5);
4179 static const range expected[] = {
4180 {10, 5},
4181 {17, 8},
4182 };
4183 SELF_CHECK (check_ranges_vector (ranges, expected));
4184 }
4185
4186 /* [2, 8] [10, 14] [17, 24] */
4187 {
4188 insert_into_bit_range_vector (&ranges, 2, 7);
4189 static const range expected[] = {
4190 {2, 7},
4191 {10, 5},
4192 {17, 8},
4193 };
4194 SELF_CHECK (check_ranges_vector (ranges, expected));
4195 }
4196
4197 /* [2, 14] [17, 24] */
4198 {
4199 insert_into_bit_range_vector (&ranges, 9, 1);
4200 static const range expected[] = {
4201 {2, 13},
4202 {17, 8},
4203 };
4204 SELF_CHECK (check_ranges_vector (ranges, expected));
4205 }
4206
4207 /* [2, 14] [17, 24] */
4208 {
4209 insert_into_bit_range_vector (&ranges, 9, 1);
4210 static const range expected[] = {
4211 {2, 13},
4212 {17, 8},
4213 };
4214 SELF_CHECK (check_ranges_vector (ranges, expected));
4215 }
4216
4217 /* [2, 33] */
4218 {
4219 insert_into_bit_range_vector (&ranges, 4, 30);
4220 static const range expected = {2, 32};
4221 SELF_CHECK (check_ranges_vector (ranges, expected));
4222 }
4223 }
4224
4225 } /* namespace selftests */
4226 #endif /* GDB_SELF_TEST */
4227
4228 void _initialize_values ();
4229 void
4230 _initialize_values ()
4231 {
4232 add_cmd ("convenience", no_class, show_convenience, _("\
4233 Debugger convenience (\"$foo\") variables and functions.\n\
4234 Convenience variables are created when you assign them values;\n\
4235 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4236 \n\
4237 A few convenience variables are given values automatically:\n\
4238 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4239 \"$__\" holds the contents of the last address examined with \"x\"."
4240 #ifdef HAVE_PYTHON
4241 "\n\n\
4242 Convenience functions are defined via the Python API."
4243 #endif
4244 ), &showlist);
4245 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4246
4247 add_cmd ("values", no_set_class, show_values, _("\
4248 Elements of value history around item number IDX (or last ten)."),
4249 &showlist);
4250
4251 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4252 Initialize a convenience variable if necessary.\n\
4253 init-if-undefined VARIABLE = EXPRESSION\n\
4254 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4255 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4256 VARIABLE is already initialized."));
4257
4258 add_prefix_cmd ("function", no_class, function_command, _("\
4259 Placeholder command for showing help on convenience functions."),
4260 &functionlist, "function ", 0, &cmdlist);
4261
4262 add_internal_function ("_isvoid", _("\
4263 Check whether an expression is void.\n\
4264 Usage: $_isvoid (expression)\n\
4265 Return 1 if the expression is void, zero otherwise."),
4266 isvoid_internal_fn, NULL);
4267
4268 add_internal_function ("_creal", _("\
4269 Extract the real part of a complex number.\n\
4270 Usage: $_creal (expression)\n\
4271 Return the real part of a complex number, the type depends on the\n\
4272 type of a complex number."),
4273 creal_internal_fn, NULL);
4274
4275 add_internal_function ("_cimag", _("\
4276 Extract the imaginary part of a complex number.\n\
4277 Usage: $_cimag (expression)\n\
4278 Return the imaginary part of a complex number, the type depends on the\n\
4279 type of a complex number."),
4280 cimag_internal_fn, NULL);
4281
4282 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4283 class_support, &max_value_size, _("\
4284 Set maximum sized value gdb will load from the inferior."), _("\
4285 Show maximum sized value gdb will load from the inferior."), _("\
4286 Use this to control the maximum size, in bytes, of a value that gdb\n\
4287 will load from the inferior. Setting this value to 'unlimited'\n\
4288 disables checking.\n\
4289 Setting this does not invalidate already allocated values, it only\n\
4290 prevents future values, larger than this size, from being allocated."),
4291 set_max_value_size,
4292 show_max_value_size,
4293 &setlist, &showlist);
4294 #if GDB_SELF_TEST
4295 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4296 selftests::register_test ("insert_into_bit_range_vector",
4297 selftests::test_insert_into_bit_range_vector);
4298 #endif
4299 }
4300
4301 /* See value.h. */
4302
4303 void
4304 finalize_values ()
4305 {
4306 all_values.clear ();
4307 }
This page took 0.138412 seconds and 4 git commands to generate.