Fix whitespace in last change in top.c
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 LONGEST offset;
69
70 /* Length of the range. */
71 LONGEST length;
72
73 /* Returns true if THIS is strictly less than OTHER, useful for
74 searching. We keep ranges sorted by offset and coalesce
75 overlapping and contiguous ranges, so this just compares the
76 starting offset. */
77
78 bool operator< (const range &other) const
79 {
80 return offset < other.offset;
81 }
82
83 /* Returns true if THIS is equal to OTHER. */
84 bool operator== (const range &other) const
85 {
86 return offset == other.offset && length == other.length;
87 }
88 };
89
90 /* Returns true if the ranges defined by [offset1, offset1+len1) and
91 [offset2, offset2+len2) overlap. */
92
93 static int
94 ranges_overlap (LONGEST offset1, LONGEST len1,
95 LONGEST offset2, LONGEST len2)
96 {
97 ULONGEST h, l;
98
99 l = std::max (offset1, offset2);
100 h = std::min (offset1 + len1, offset2 + len2);
101 return (l < h);
102 }
103
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 OFFSET+LENGTH). */
106
107 static int
108 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
109 LONGEST length)
110 {
111 range what;
112
113 what.offset = offset;
114 what.length = length;
115
116 /* We keep ranges sorted by offset and coalesce overlapping and
117 contiguous ranges, so to check if a range list contains a given
118 range, we can do a binary search for the position the given range
119 would be inserted if we only considered the starting OFFSET of
120 ranges. We call that position I. Since we also have LENGTH to
121 care for (this is a range afterall), we need to check if the
122 _previous_ range overlaps the I range. E.g.,
123
124 R
125 |---|
126 |---| |---| |------| ... |--|
127 0 1 2 N
128
129 I=1
130
131 In the case above, the binary search would return `I=1', meaning,
132 this OFFSET should be inserted at position 1, and the current
133 position 1 should be pushed further (and before 2). But, `0'
134 overlaps with R.
135
136 Then we need to check if the I range overlaps the I range itself.
137 E.g.,
138
139 R
140 |---|
141 |---| |---| |-------| ... |--|
142 0 1 2 N
143
144 I=1
145 */
146
147
148 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
149
150 if (i > ranges.begin ())
151 {
152 const struct range &bef = *(i - 1);
153
154 if (ranges_overlap (bef.offset, bef.length, offset, length))
155 return 1;
156 }
157
158 if (i < ranges.end ())
159 {
160 const struct range &r = *i;
161
162 if (ranges_overlap (r.offset, r.length, offset, length))
163 return 1;
164 }
165
166 return 0;
167 }
168
169 static struct cmd_list_element *functionlist;
170
171 /* Note that the fields in this structure are arranged to save a bit
172 of memory. */
173
174 struct value
175 {
176 explicit value (struct type *type_)
177 : modifiable (1),
178 lazy (1),
179 initialized (1),
180 stack (0),
181 type (type_),
182 enclosing_type (type_)
183 {
184 }
185
186 ~value ()
187 {
188 if (VALUE_LVAL (this) == lval_computed)
189 {
190 const struct lval_funcs *funcs = location.computed.funcs;
191
192 if (funcs->free_closure)
193 funcs->free_closure (this);
194 }
195 else if (VALUE_LVAL (this) == lval_xcallable)
196 delete location.xm_worker;
197 }
198
199 DISABLE_COPY_AND_ASSIGN (value);
200
201 /* Type of value; either not an lval, or one of the various
202 different possible kinds of lval. */
203 enum lval_type lval = not_lval;
204
205 /* Is it modifiable? Only relevant if lval != not_lval. */
206 unsigned int modifiable : 1;
207
208 /* If zero, contents of this value are in the contents field. If
209 nonzero, contents are in inferior. If the lval field is lval_memory,
210 the contents are in inferior memory at location.address plus offset.
211 The lval field may also be lval_register.
212
213 WARNING: This field is used by the code which handles watchpoints
214 (see breakpoint.c) to decide whether a particular value can be
215 watched by hardware watchpoints. If the lazy flag is set for
216 some member of a value chain, it is assumed that this member of
217 the chain doesn't need to be watched as part of watching the
218 value itself. This is how GDB avoids watching the entire struct
219 or array when the user wants to watch a single struct member or
220 array element. If you ever change the way lazy flag is set and
221 reset, be sure to consider this use as well! */
222 unsigned int lazy : 1;
223
224 /* If value is a variable, is it initialized or not. */
225 unsigned int initialized : 1;
226
227 /* If value is from the stack. If this is set, read_stack will be
228 used instead of read_memory to enable extra caching. */
229 unsigned int stack : 1;
230
231 /* Location of value (if lval). */
232 union
233 {
234 /* If lval == lval_memory, this is the address in the inferior */
235 CORE_ADDR address;
236
237 /*If lval == lval_register, the value is from a register. */
238 struct
239 {
240 /* Register number. */
241 int regnum;
242 /* Frame ID of "next" frame to which a register value is relative.
243 If the register value is found relative to frame F, then the
244 frame id of F->next will be stored in next_frame_id. */
245 struct frame_id next_frame_id;
246 } reg;
247
248 /* Pointer to internal variable. */
249 struct internalvar *internalvar;
250
251 /* Pointer to xmethod worker. */
252 struct xmethod_worker *xm_worker;
253
254 /* If lval == lval_computed, this is a set of function pointers
255 to use to access and describe the value, and a closure pointer
256 for them to use. */
257 struct
258 {
259 /* Functions to call. */
260 const struct lval_funcs *funcs;
261
262 /* Closure for those functions to use. */
263 void *closure;
264 } computed;
265 } location {};
266
267 /* Describes offset of a value within lval of a structure in target
268 addressable memory units. Note also the member embedded_offset
269 below. */
270 LONGEST offset = 0;
271
272 /* Only used for bitfields; number of bits contained in them. */
273 LONGEST bitsize = 0;
274
275 /* Only used for bitfields; position of start of field. For
276 little-endian targets, it is the position of the LSB. For
277 big-endian targets, it is the position of the MSB. */
278 LONGEST bitpos = 0;
279
280 /* The number of references to this value. When a value is created,
281 the value chain holds a reference, so REFERENCE_COUNT is 1. If
282 release_value is called, this value is removed from the chain but
283 the caller of release_value now has a reference to this value.
284 The caller must arrange for a call to value_free later. */
285 int reference_count = 1;
286
287 /* Only used for bitfields; the containing value. This allows a
288 single read from the target when displaying multiple
289 bitfields. */
290 value_ref_ptr parent;
291
292 /* Type of the value. */
293 struct type *type;
294
295 /* If a value represents a C++ object, then the `type' field gives
296 the object's compile-time type. If the object actually belongs
297 to some class derived from `type', perhaps with other base
298 classes and additional members, then `type' is just a subobject
299 of the real thing, and the full object is probably larger than
300 `type' would suggest.
301
302 If `type' is a dynamic class (i.e. one with a vtable), then GDB
303 can actually determine the object's run-time type by looking at
304 the run-time type information in the vtable. When this
305 information is available, we may elect to read in the entire
306 object, for several reasons:
307
308 - When printing the value, the user would probably rather see the
309 full object, not just the limited portion apparent from the
310 compile-time type.
311
312 - If `type' has virtual base classes, then even printing `type'
313 alone may require reaching outside the `type' portion of the
314 object to wherever the virtual base class has been stored.
315
316 When we store the entire object, `enclosing_type' is the run-time
317 type -- the complete object -- and `embedded_offset' is the
318 offset of `type' within that larger type, in target addressable memory
319 units. The value_contents() macro takes `embedded_offset' into account,
320 so most GDB code continues to see the `type' portion of the value, just
321 as the inferior would.
322
323 If `type' is a pointer to an object, then `enclosing_type' is a
324 pointer to the object's run-time type, and `pointed_to_offset' is
325 the offset in target addressable memory units from the full object
326 to the pointed-to object -- that is, the value `embedded_offset' would
327 have if we followed the pointer and fetched the complete object.
328 (I don't really see the point. Why not just determine the
329 run-time type when you indirect, and avoid the special case? The
330 contents don't matter until you indirect anyway.)
331
332 If we're not doing anything fancy, `enclosing_type' is equal to
333 `type', and `embedded_offset' is zero, so everything works
334 normally. */
335 struct type *enclosing_type;
336 LONGEST embedded_offset = 0;
337 LONGEST pointed_to_offset = 0;
338
339 /* Actual contents of the value. Target byte-order. NULL or not
340 valid if lazy is nonzero. */
341 gdb::unique_xmalloc_ptr<gdb_byte> contents;
342
343 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
344 rather than available, since the common and default case is for a
345 value to be available. This is filled in at value read time.
346 The unavailable ranges are tracked in bits. Note that a contents
347 bit that has been optimized out doesn't really exist in the
348 program, so it can't be marked unavailable either. */
349 std::vector<range> unavailable;
350
351 /* Likewise, but for optimized out contents (a chunk of the value of
352 a variable that does not actually exist in the program). If LVAL
353 is lval_register, this is a register ($pc, $sp, etc., never a
354 program variable) that has not been saved in the frame. Not
355 saved registers and optimized-out program variables values are
356 treated pretty much the same, except not-saved registers have a
357 different string representation and related error strings. */
358 std::vector<range> optimized_out;
359 };
360
361 /* See value.h. */
362
363 struct gdbarch *
364 get_value_arch (const struct value *value)
365 {
366 return get_type_arch (value_type (value));
367 }
368
369 int
370 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
371 {
372 gdb_assert (!value->lazy);
373
374 return !ranges_contain (value->unavailable, offset, length);
375 }
376
377 int
378 value_bytes_available (const struct value *value,
379 LONGEST offset, LONGEST length)
380 {
381 return value_bits_available (value,
382 offset * TARGET_CHAR_BIT,
383 length * TARGET_CHAR_BIT);
384 }
385
386 int
387 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
388 {
389 gdb_assert (!value->lazy);
390
391 return ranges_contain (value->optimized_out, bit_offset, bit_length);
392 }
393
394 int
395 value_entirely_available (struct value *value)
396 {
397 /* We can only tell whether the whole value is available when we try
398 to read it. */
399 if (value->lazy)
400 value_fetch_lazy (value);
401
402 if (value->unavailable.empty ())
403 return 1;
404 return 0;
405 }
406
407 /* Returns true if VALUE is entirely covered by RANGES. If the value
408 is lazy, it'll be read now. Note that RANGE is a pointer to
409 pointer because reading the value might change *RANGE. */
410
411 static int
412 value_entirely_covered_by_range_vector (struct value *value,
413 const std::vector<range> &ranges)
414 {
415 /* We can only tell whether the whole value is optimized out /
416 unavailable when we try to read it. */
417 if (value->lazy)
418 value_fetch_lazy (value);
419
420 if (ranges.size () == 1)
421 {
422 const struct range &t = ranges[0];
423
424 if (t.offset == 0
425 && t.length == (TARGET_CHAR_BIT
426 * TYPE_LENGTH (value_enclosing_type (value))))
427 return 1;
428 }
429
430 return 0;
431 }
432
433 int
434 value_entirely_unavailable (struct value *value)
435 {
436 return value_entirely_covered_by_range_vector (value, value->unavailable);
437 }
438
439 int
440 value_entirely_optimized_out (struct value *value)
441 {
442 return value_entirely_covered_by_range_vector (value, value->optimized_out);
443 }
444
445 /* Insert into the vector pointed to by VECTORP the bit range starting of
446 OFFSET bits, and extending for the next LENGTH bits. */
447
448 static void
449 insert_into_bit_range_vector (std::vector<range> *vectorp,
450 LONGEST offset, LONGEST length)
451 {
452 range newr;
453
454 /* Insert the range sorted. If there's overlap or the new range
455 would be contiguous with an existing range, merge. */
456
457 newr.offset = offset;
458 newr.length = length;
459
460 /* Do a binary search for the position the given range would be
461 inserted if we only considered the starting OFFSET of ranges.
462 Call that position I. Since we also have LENGTH to care for
463 (this is a range afterall), we need to check if the _previous_
464 range overlaps the I range. E.g., calling R the new range:
465
466 #1 - overlaps with previous
467
468 R
469 |-...-|
470 |---| |---| |------| ... |--|
471 0 1 2 N
472
473 I=1
474
475 In the case #1 above, the binary search would return `I=1',
476 meaning, this OFFSET should be inserted at position 1, and the
477 current position 1 should be pushed further (and become 2). But,
478 note that `0' overlaps with R, so we want to merge them.
479
480 A similar consideration needs to be taken if the new range would
481 be contiguous with the previous range:
482
483 #2 - contiguous with previous
484
485 R
486 |-...-|
487 |--| |---| |------| ... |--|
488 0 1 2 N
489
490 I=1
491
492 If there's no overlap with the previous range, as in:
493
494 #3 - not overlapping and not contiguous
495
496 R
497 |-...-|
498 |--| |---| |------| ... |--|
499 0 1 2 N
500
501 I=1
502
503 or if I is 0:
504
505 #4 - R is the range with lowest offset
506
507 R
508 |-...-|
509 |--| |---| |------| ... |--|
510 0 1 2 N
511
512 I=0
513
514 ... we just push the new range to I.
515
516 All the 4 cases above need to consider that the new range may
517 also overlap several of the ranges that follow, or that R may be
518 contiguous with the following range, and merge. E.g.,
519
520 #5 - overlapping following ranges
521
522 R
523 |------------------------|
524 |--| |---| |------| ... |--|
525 0 1 2 N
526
527 I=0
528
529 or:
530
531 R
532 |-------|
533 |--| |---| |------| ... |--|
534 0 1 2 N
535
536 I=1
537
538 */
539
540 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
541 if (i > vectorp->begin ())
542 {
543 struct range &bef = *(i - 1);
544
545 if (ranges_overlap (bef.offset, bef.length, offset, length))
546 {
547 /* #1 */
548 ULONGEST l = std::min (bef.offset, offset);
549 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
550
551 bef.offset = l;
552 bef.length = h - l;
553 i--;
554 }
555 else if (offset == bef.offset + bef.length)
556 {
557 /* #2 */
558 bef.length += length;
559 i--;
560 }
561 else
562 {
563 /* #3 */
564 i = vectorp->insert (i, newr);
565 }
566 }
567 else
568 {
569 /* #4 */
570 i = vectorp->insert (i, newr);
571 }
572
573 /* Check whether the ranges following the one we've just added or
574 touched can be folded in (#5 above). */
575 if (i != vectorp->end () && i + 1 < vectorp->end ())
576 {
577 int removed = 0;
578 auto next = i + 1;
579
580 /* Get the range we just touched. */
581 struct range &t = *i;
582 removed = 0;
583
584 i = next;
585 for (; i < vectorp->end (); i++)
586 {
587 struct range &r = *i;
588 if (r.offset <= t.offset + t.length)
589 {
590 ULONGEST l, h;
591
592 l = std::min (t.offset, r.offset);
593 h = std::max (t.offset + t.length, r.offset + r.length);
594
595 t.offset = l;
596 t.length = h - l;
597
598 removed++;
599 }
600 else
601 {
602 /* If we couldn't merge this one, we won't be able to
603 merge following ones either, since the ranges are
604 always sorted by OFFSET. */
605 break;
606 }
607 }
608
609 if (removed != 0)
610 vectorp->erase (next, next + removed);
611 }
612 }
613
614 void
615 mark_value_bits_unavailable (struct value *value,
616 LONGEST offset, LONGEST length)
617 {
618 insert_into_bit_range_vector (&value->unavailable, offset, length);
619 }
620
621 void
622 mark_value_bytes_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 mark_value_bits_unavailable (value,
626 offset * TARGET_CHAR_BIT,
627 length * TARGET_CHAR_BIT);
628 }
629
630 /* Find the first range in RANGES that overlaps the range defined by
631 OFFSET and LENGTH, starting at element POS in the RANGES vector,
632 Returns the index into RANGES where such overlapping range was
633 found, or -1 if none was found. */
634
635 static int
636 find_first_range_overlap (const std::vector<range> *ranges, int pos,
637 LONGEST offset, LONGEST length)
638 {
639 int i;
640
641 for (i = pos; i < ranges->size (); i++)
642 {
643 const range &r = (*ranges)[i];
644 if (ranges_overlap (r.offset, r.length, offset, length))
645 return i;
646 }
647
648 return -1;
649 }
650
651 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
652 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
653 return non-zero.
654
655 It must always be the case that:
656 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
657
658 It is assumed that memory can be accessed from:
659 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
660 to:
661 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
662 / TARGET_CHAR_BIT) */
663 static int
664 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
665 const gdb_byte *ptr2, size_t offset2_bits,
666 size_t length_bits)
667 {
668 gdb_assert (offset1_bits % TARGET_CHAR_BIT
669 == offset2_bits % TARGET_CHAR_BIT);
670
671 if (offset1_bits % TARGET_CHAR_BIT != 0)
672 {
673 size_t bits;
674 gdb_byte mask, b1, b2;
675
676 /* The offset from the base pointers PTR1 and PTR2 is not a complete
677 number of bytes. A number of bits up to either the next exact
678 byte boundary, or LENGTH_BITS (which ever is sooner) will be
679 compared. */
680 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
681 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
682 mask = (1 << bits) - 1;
683
684 if (length_bits < bits)
685 {
686 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
687 bits = length_bits;
688 }
689
690 /* Now load the two bytes and mask off the bits we care about. */
691 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
692 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
693
694 if (b1 != b2)
695 return 1;
696
697 /* Now update the length and offsets to take account of the bits
698 we've just compared. */
699 length_bits -= bits;
700 offset1_bits += bits;
701 offset2_bits += bits;
702 }
703
704 if (length_bits % TARGET_CHAR_BIT != 0)
705 {
706 size_t bits;
707 size_t o1, o2;
708 gdb_byte mask, b1, b2;
709
710 /* The length is not an exact number of bytes. After the previous
711 IF.. block then the offsets are byte aligned, or the
712 length is zero (in which case this code is not reached). Compare
713 a number of bits at the end of the region, starting from an exact
714 byte boundary. */
715 bits = length_bits % TARGET_CHAR_BIT;
716 o1 = offset1_bits + length_bits - bits;
717 o2 = offset2_bits + length_bits - bits;
718
719 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
720 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
721
722 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
723 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
724
725 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
726 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
727
728 if (b1 != b2)
729 return 1;
730
731 length_bits -= bits;
732 }
733
734 if (length_bits > 0)
735 {
736 /* We've now taken care of any stray "bits" at the start, or end of
737 the region to compare, the remainder can be covered with a simple
738 memcmp. */
739 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
740 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
741 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
742
743 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
744 ptr2 + offset2_bits / TARGET_CHAR_BIT,
745 length_bits / TARGET_CHAR_BIT);
746 }
747
748 /* Length is zero, regions match. */
749 return 0;
750 }
751
752 /* Helper struct for find_first_range_overlap_and_match and
753 value_contents_bits_eq. Keep track of which slot of a given ranges
754 vector have we last looked at. */
755
756 struct ranges_and_idx
757 {
758 /* The ranges. */
759 const std::vector<range> *ranges;
760
761 /* The range we've last found in RANGES. Given ranges are sorted,
762 we can start the next lookup here. */
763 int idx;
764 };
765
766 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
767 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
768 ranges starting at OFFSET2 bits. Return true if the ranges match
769 and fill in *L and *H with the overlapping window relative to
770 (both) OFFSET1 or OFFSET2. */
771
772 static int
773 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
774 struct ranges_and_idx *rp2,
775 LONGEST offset1, LONGEST offset2,
776 LONGEST length, ULONGEST *l, ULONGEST *h)
777 {
778 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
779 offset1, length);
780 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
781 offset2, length);
782
783 if (rp1->idx == -1 && rp2->idx == -1)
784 {
785 *l = length;
786 *h = length;
787 return 1;
788 }
789 else if (rp1->idx == -1 || rp2->idx == -1)
790 return 0;
791 else
792 {
793 const range *r1, *r2;
794 ULONGEST l1, h1;
795 ULONGEST l2, h2;
796
797 r1 = &(*rp1->ranges)[rp1->idx];
798 r2 = &(*rp2->ranges)[rp2->idx];
799
800 /* Get the unavailable windows intersected by the incoming
801 ranges. The first and last ranges that overlap the argument
802 range may be wider than said incoming arguments ranges. */
803 l1 = std::max (offset1, r1->offset);
804 h1 = std::min (offset1 + length, r1->offset + r1->length);
805
806 l2 = std::max (offset2, r2->offset);
807 h2 = std::min (offset2 + length, offset2 + r2->length);
808
809 /* Make them relative to the respective start offsets, so we can
810 compare them for equality. */
811 l1 -= offset1;
812 h1 -= offset1;
813
814 l2 -= offset2;
815 h2 -= offset2;
816
817 /* Different ranges, no match. */
818 if (l1 != l2 || h1 != h2)
819 return 0;
820
821 *h = h1;
822 *l = l1;
823 return 1;
824 }
825 }
826
827 /* Helper function for value_contents_eq. The only difference is that
828 this function is bit rather than byte based.
829
830 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
831 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
832 Return true if the available bits match. */
833
834 static bool
835 value_contents_bits_eq (const struct value *val1, int offset1,
836 const struct value *val2, int offset2,
837 int length)
838 {
839 /* Each array element corresponds to a ranges source (unavailable,
840 optimized out). '1' is for VAL1, '2' for VAL2. */
841 struct ranges_and_idx rp1[2], rp2[2];
842
843 /* See function description in value.h. */
844 gdb_assert (!val1->lazy && !val2->lazy);
845
846 /* We shouldn't be trying to compare past the end of the values. */
847 gdb_assert (offset1 + length
848 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
849 gdb_assert (offset2 + length
850 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
851
852 memset (&rp1, 0, sizeof (rp1));
853 memset (&rp2, 0, sizeof (rp2));
854 rp1[0].ranges = &val1->unavailable;
855 rp2[0].ranges = &val2->unavailable;
856 rp1[1].ranges = &val1->optimized_out;
857 rp2[1].ranges = &val2->optimized_out;
858
859 while (length > 0)
860 {
861 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
862 int i;
863
864 for (i = 0; i < 2; i++)
865 {
866 ULONGEST l_tmp, h_tmp;
867
868 /* The contents only match equal if the invalid/unavailable
869 contents ranges match as well. */
870 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
871 offset1, offset2, length,
872 &l_tmp, &h_tmp))
873 return false;
874
875 /* We're interested in the lowest/first range found. */
876 if (i == 0 || l_tmp < l)
877 {
878 l = l_tmp;
879 h = h_tmp;
880 }
881 }
882
883 /* Compare the available/valid contents. */
884 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
885 val2->contents.get (), offset2, l) != 0)
886 return false;
887
888 length -= h;
889 offset1 += h;
890 offset2 += h;
891 }
892
893 return true;
894 }
895
896 bool
897 value_contents_eq (const struct value *val1, LONGEST offset1,
898 const struct value *val2, LONGEST offset2,
899 LONGEST length)
900 {
901 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
902 val2, offset2 * TARGET_CHAR_BIT,
903 length * TARGET_CHAR_BIT);
904 }
905
906
907 /* The value-history records all the values printed by print commands
908 during this session. */
909
910 static std::vector<value_ref_ptr> value_history;
911
912 \f
913 /* List of all value objects currently allocated
914 (except for those released by calls to release_value)
915 This is so they can be freed after each command. */
916
917 static std::vector<value_ref_ptr> all_values;
918
919 /* Allocate a lazy value for type TYPE. Its actual content is
920 "lazily" allocated too: the content field of the return value is
921 NULL; it will be allocated when it is fetched from the target. */
922
923 struct value *
924 allocate_value_lazy (struct type *type)
925 {
926 struct value *val;
927
928 /* Call check_typedef on our type to make sure that, if TYPE
929 is a TYPE_CODE_TYPEDEF, its length is set to the length
930 of the target type instead of zero. However, we do not
931 replace the typedef type by the target type, because we want
932 to keep the typedef in order to be able to set the VAL's type
933 description correctly. */
934 check_typedef (type);
935
936 val = new struct value (type);
937
938 /* Values start out on the all_values chain. */
939 all_values.emplace_back (val);
940
941 return val;
942 }
943
944 /* The maximum size, in bytes, that GDB will try to allocate for a value.
945 The initial value of 64k was not selected for any specific reason, it is
946 just a reasonable starting point. */
947
948 static int max_value_size = 65536; /* 64k bytes */
949
950 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
951 LONGEST, otherwise GDB will not be able to parse integer values from the
952 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
953 be unable to parse "set max-value-size 2".
954
955 As we want a consistent GDB experience across hosts with different sizes
956 of LONGEST, this arbitrary minimum value was selected, so long as this
957 is bigger than LONGEST on all GDB supported hosts we're fine. */
958
959 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
960 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
961
962 /* Implement the "set max-value-size" command. */
963
964 static void
965 set_max_value_size (const char *args, int from_tty,
966 struct cmd_list_element *c)
967 {
968 gdb_assert (max_value_size == -1 || max_value_size >= 0);
969
970 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
971 {
972 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
973 error (_("max-value-size set too low, increasing to %d bytes"),
974 max_value_size);
975 }
976 }
977
978 /* Implement the "show max-value-size" command. */
979
980 static void
981 show_max_value_size (struct ui_file *file, int from_tty,
982 struct cmd_list_element *c, const char *value)
983 {
984 if (max_value_size == -1)
985 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
986 else
987 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
988 max_value_size);
989 }
990
991 /* Called before we attempt to allocate or reallocate a buffer for the
992 contents of a value. TYPE is the type of the value for which we are
993 allocating the buffer. If the buffer is too large (based on the user
994 controllable setting) then throw an error. If this function returns
995 then we should attempt to allocate the buffer. */
996
997 static void
998 check_type_length_before_alloc (const struct type *type)
999 {
1000 unsigned int length = TYPE_LENGTH (type);
1001
1002 if (max_value_size > -1 && length > max_value_size)
1003 {
1004 if (TYPE_NAME (type) != NULL)
1005 error (_("value of type `%s' requires %u bytes, which is more "
1006 "than max-value-size"), TYPE_NAME (type), length);
1007 else
1008 error (_("value requires %u bytes, which is more than "
1009 "max-value-size"), length);
1010 }
1011 }
1012
1013 /* Allocate the contents of VAL if it has not been allocated yet. */
1014
1015 static void
1016 allocate_value_contents (struct value *val)
1017 {
1018 if (!val->contents)
1019 {
1020 check_type_length_before_alloc (val->enclosing_type);
1021 val->contents.reset
1022 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1023 }
1024 }
1025
1026 /* Allocate a value and its contents for type TYPE. */
1027
1028 struct value *
1029 allocate_value (struct type *type)
1030 {
1031 struct value *val = allocate_value_lazy (type);
1032
1033 allocate_value_contents (val);
1034 val->lazy = 0;
1035 return val;
1036 }
1037
1038 /* Allocate a value that has the correct length
1039 for COUNT repetitions of type TYPE. */
1040
1041 struct value *
1042 allocate_repeat_value (struct type *type, int count)
1043 {
1044 int low_bound = current_language->string_lower_bound; /* ??? */
1045 /* FIXME-type-allocation: need a way to free this type when we are
1046 done with it. */
1047 struct type *array_type
1048 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1049
1050 return allocate_value (array_type);
1051 }
1052
1053 struct value *
1054 allocate_computed_value (struct type *type,
1055 const struct lval_funcs *funcs,
1056 void *closure)
1057 {
1058 struct value *v = allocate_value_lazy (type);
1059
1060 VALUE_LVAL (v) = lval_computed;
1061 v->location.computed.funcs = funcs;
1062 v->location.computed.closure = closure;
1063
1064 return v;
1065 }
1066
1067 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1068
1069 struct value *
1070 allocate_optimized_out_value (struct type *type)
1071 {
1072 struct value *retval = allocate_value_lazy (type);
1073
1074 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1075 set_value_lazy (retval, 0);
1076 return retval;
1077 }
1078
1079 /* Accessor methods. */
1080
1081 struct type *
1082 value_type (const struct value *value)
1083 {
1084 return value->type;
1085 }
1086 void
1087 deprecated_set_value_type (struct value *value, struct type *type)
1088 {
1089 value->type = type;
1090 }
1091
1092 LONGEST
1093 value_offset (const struct value *value)
1094 {
1095 return value->offset;
1096 }
1097 void
1098 set_value_offset (struct value *value, LONGEST offset)
1099 {
1100 value->offset = offset;
1101 }
1102
1103 LONGEST
1104 value_bitpos (const struct value *value)
1105 {
1106 return value->bitpos;
1107 }
1108 void
1109 set_value_bitpos (struct value *value, LONGEST bit)
1110 {
1111 value->bitpos = bit;
1112 }
1113
1114 LONGEST
1115 value_bitsize (const struct value *value)
1116 {
1117 return value->bitsize;
1118 }
1119 void
1120 set_value_bitsize (struct value *value, LONGEST bit)
1121 {
1122 value->bitsize = bit;
1123 }
1124
1125 struct value *
1126 value_parent (const struct value *value)
1127 {
1128 return value->parent.get ();
1129 }
1130
1131 /* See value.h. */
1132
1133 void
1134 set_value_parent (struct value *value, struct value *parent)
1135 {
1136 value->parent = value_ref_ptr::new_reference (parent);
1137 }
1138
1139 gdb_byte *
1140 value_contents_raw (struct value *value)
1141 {
1142 struct gdbarch *arch = get_value_arch (value);
1143 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1144
1145 allocate_value_contents (value);
1146 return value->contents.get () + value->embedded_offset * unit_size;
1147 }
1148
1149 gdb_byte *
1150 value_contents_all_raw (struct value *value)
1151 {
1152 allocate_value_contents (value);
1153 return value->contents.get ();
1154 }
1155
1156 struct type *
1157 value_enclosing_type (const struct value *value)
1158 {
1159 return value->enclosing_type;
1160 }
1161
1162 /* Look at value.h for description. */
1163
1164 struct type *
1165 value_actual_type (struct value *value, int resolve_simple_types,
1166 int *real_type_found)
1167 {
1168 struct value_print_options opts;
1169 struct type *result;
1170
1171 get_user_print_options (&opts);
1172
1173 if (real_type_found)
1174 *real_type_found = 0;
1175 result = value_type (value);
1176 if (opts.objectprint)
1177 {
1178 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1179 fetch its rtti type. */
1180 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1181 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1182 == TYPE_CODE_STRUCT
1183 && !value_optimized_out (value))
1184 {
1185 struct type *real_type;
1186
1187 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1188 if (real_type)
1189 {
1190 if (real_type_found)
1191 *real_type_found = 1;
1192 result = real_type;
1193 }
1194 }
1195 else if (resolve_simple_types)
1196 {
1197 if (real_type_found)
1198 *real_type_found = 1;
1199 result = value_enclosing_type (value);
1200 }
1201 }
1202
1203 return result;
1204 }
1205
1206 void
1207 error_value_optimized_out (void)
1208 {
1209 error (_("value has been optimized out"));
1210 }
1211
1212 static void
1213 require_not_optimized_out (const struct value *value)
1214 {
1215 if (!value->optimized_out.empty ())
1216 {
1217 if (value->lval == lval_register)
1218 error (_("register has not been saved in frame"));
1219 else
1220 error_value_optimized_out ();
1221 }
1222 }
1223
1224 static void
1225 require_available (const struct value *value)
1226 {
1227 if (!value->unavailable.empty ())
1228 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1229 }
1230
1231 const gdb_byte *
1232 value_contents_for_printing (struct value *value)
1233 {
1234 if (value->lazy)
1235 value_fetch_lazy (value);
1236 return value->contents.get ();
1237 }
1238
1239 const gdb_byte *
1240 value_contents_for_printing_const (const struct value *value)
1241 {
1242 gdb_assert (!value->lazy);
1243 return value->contents.get ();
1244 }
1245
1246 const gdb_byte *
1247 value_contents_all (struct value *value)
1248 {
1249 const gdb_byte *result = value_contents_for_printing (value);
1250 require_not_optimized_out (value);
1251 require_available (value);
1252 return result;
1253 }
1254
1255 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1256 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1257
1258 static void
1259 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1260 const std::vector<range> &src_range, int src_bit_offset,
1261 int bit_length)
1262 {
1263 for (const range &r : src_range)
1264 {
1265 ULONGEST h, l;
1266
1267 l = std::max (r.offset, (LONGEST) src_bit_offset);
1268 h = std::min (r.offset + r.length,
1269 (LONGEST) src_bit_offset + bit_length);
1270
1271 if (l < h)
1272 insert_into_bit_range_vector (dst_range,
1273 dst_bit_offset + (l - src_bit_offset),
1274 h - l);
1275 }
1276 }
1277
1278 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1279 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1280
1281 static void
1282 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1283 const struct value *src, int src_bit_offset,
1284 int bit_length)
1285 {
1286 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1287 src->unavailable, src_bit_offset,
1288 bit_length);
1289 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1290 src->optimized_out, src_bit_offset,
1291 bit_length);
1292 }
1293
1294 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1295 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1296 contents, starting at DST_OFFSET. If unavailable contents are
1297 being copied from SRC, the corresponding DST contents are marked
1298 unavailable accordingly. Neither DST nor SRC may be lazy
1299 values.
1300
1301 It is assumed the contents of DST in the [DST_OFFSET,
1302 DST_OFFSET+LENGTH) range are wholly available. */
1303
1304 void
1305 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1306 struct value *src, LONGEST src_offset, LONGEST length)
1307 {
1308 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1309 struct gdbarch *arch = get_value_arch (src);
1310 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1311
1312 /* A lazy DST would make that this copy operation useless, since as
1313 soon as DST's contents were un-lazied (by a later value_contents
1314 call, say), the contents would be overwritten. A lazy SRC would
1315 mean we'd be copying garbage. */
1316 gdb_assert (!dst->lazy && !src->lazy);
1317
1318 /* The overwritten DST range gets unavailability ORed in, not
1319 replaced. Make sure to remember to implement replacing if it
1320 turns out actually necessary. */
1321 gdb_assert (value_bytes_available (dst, dst_offset, length));
1322 gdb_assert (!value_bits_any_optimized_out (dst,
1323 TARGET_CHAR_BIT * dst_offset,
1324 TARGET_CHAR_BIT * length));
1325
1326 /* Copy the data. */
1327 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1328 value_contents_all_raw (src) + src_offset * unit_size,
1329 length * unit_size);
1330
1331 /* Copy the meta-data, adjusted. */
1332 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1333 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1334 bit_length = length * unit_size * HOST_CHAR_BIT;
1335
1336 value_ranges_copy_adjusted (dst, dst_bit_offset,
1337 src, src_bit_offset,
1338 bit_length);
1339 }
1340
1341 /* Copy LENGTH bytes of SRC value's (all) contents
1342 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1343 (all) contents, starting at DST_OFFSET. If unavailable contents
1344 are being copied from SRC, the corresponding DST contents are
1345 marked unavailable accordingly. DST must not be lazy. If SRC is
1346 lazy, it will be fetched now.
1347
1348 It is assumed the contents of DST in the [DST_OFFSET,
1349 DST_OFFSET+LENGTH) range are wholly available. */
1350
1351 void
1352 value_contents_copy (struct value *dst, LONGEST dst_offset,
1353 struct value *src, LONGEST src_offset, LONGEST length)
1354 {
1355 if (src->lazy)
1356 value_fetch_lazy (src);
1357
1358 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1359 }
1360
1361 int
1362 value_lazy (const struct value *value)
1363 {
1364 return value->lazy;
1365 }
1366
1367 void
1368 set_value_lazy (struct value *value, int val)
1369 {
1370 value->lazy = val;
1371 }
1372
1373 int
1374 value_stack (const struct value *value)
1375 {
1376 return value->stack;
1377 }
1378
1379 void
1380 set_value_stack (struct value *value, int val)
1381 {
1382 value->stack = val;
1383 }
1384
1385 const gdb_byte *
1386 value_contents (struct value *value)
1387 {
1388 const gdb_byte *result = value_contents_writeable (value);
1389 require_not_optimized_out (value);
1390 require_available (value);
1391 return result;
1392 }
1393
1394 gdb_byte *
1395 value_contents_writeable (struct value *value)
1396 {
1397 if (value->lazy)
1398 value_fetch_lazy (value);
1399 return value_contents_raw (value);
1400 }
1401
1402 int
1403 value_optimized_out (struct value *value)
1404 {
1405 /* We can only know if a value is optimized out once we have tried to
1406 fetch it. */
1407 if (value->optimized_out.empty () && value->lazy)
1408 {
1409 try
1410 {
1411 value_fetch_lazy (value);
1412 }
1413 catch (const gdb_exception_error &ex)
1414 {
1415 /* Fall back to checking value->optimized_out. */
1416 }
1417 }
1418
1419 return !value->optimized_out.empty ();
1420 }
1421
1422 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1423 the following LENGTH bytes. */
1424
1425 void
1426 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1427 {
1428 mark_value_bits_optimized_out (value,
1429 offset * TARGET_CHAR_BIT,
1430 length * TARGET_CHAR_BIT);
1431 }
1432
1433 /* See value.h. */
1434
1435 void
1436 mark_value_bits_optimized_out (struct value *value,
1437 LONGEST offset, LONGEST length)
1438 {
1439 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1440 }
1441
1442 int
1443 value_bits_synthetic_pointer (const struct value *value,
1444 LONGEST offset, LONGEST length)
1445 {
1446 if (value->lval != lval_computed
1447 || !value->location.computed.funcs->check_synthetic_pointer)
1448 return 0;
1449 return value->location.computed.funcs->check_synthetic_pointer (value,
1450 offset,
1451 length);
1452 }
1453
1454 LONGEST
1455 value_embedded_offset (const struct value *value)
1456 {
1457 return value->embedded_offset;
1458 }
1459
1460 void
1461 set_value_embedded_offset (struct value *value, LONGEST val)
1462 {
1463 value->embedded_offset = val;
1464 }
1465
1466 LONGEST
1467 value_pointed_to_offset (const struct value *value)
1468 {
1469 return value->pointed_to_offset;
1470 }
1471
1472 void
1473 set_value_pointed_to_offset (struct value *value, LONGEST val)
1474 {
1475 value->pointed_to_offset = val;
1476 }
1477
1478 const struct lval_funcs *
1479 value_computed_funcs (const struct value *v)
1480 {
1481 gdb_assert (value_lval_const (v) == lval_computed);
1482
1483 return v->location.computed.funcs;
1484 }
1485
1486 void *
1487 value_computed_closure (const struct value *v)
1488 {
1489 gdb_assert (v->lval == lval_computed);
1490
1491 return v->location.computed.closure;
1492 }
1493
1494 enum lval_type *
1495 deprecated_value_lval_hack (struct value *value)
1496 {
1497 return &value->lval;
1498 }
1499
1500 enum lval_type
1501 value_lval_const (const struct value *value)
1502 {
1503 return value->lval;
1504 }
1505
1506 CORE_ADDR
1507 value_address (const struct value *value)
1508 {
1509 if (value->lval != lval_memory)
1510 return 0;
1511 if (value->parent != NULL)
1512 return value_address (value->parent.get ()) + value->offset;
1513 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1514 {
1515 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1516 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1517 }
1518
1519 return value->location.address + value->offset;
1520 }
1521
1522 CORE_ADDR
1523 value_raw_address (const struct value *value)
1524 {
1525 if (value->lval != lval_memory)
1526 return 0;
1527 return value->location.address;
1528 }
1529
1530 void
1531 set_value_address (struct value *value, CORE_ADDR addr)
1532 {
1533 gdb_assert (value->lval == lval_memory);
1534 value->location.address = addr;
1535 }
1536
1537 struct internalvar **
1538 deprecated_value_internalvar_hack (struct value *value)
1539 {
1540 return &value->location.internalvar;
1541 }
1542
1543 struct frame_id *
1544 deprecated_value_next_frame_id_hack (struct value *value)
1545 {
1546 gdb_assert (value->lval == lval_register);
1547 return &value->location.reg.next_frame_id;
1548 }
1549
1550 int *
1551 deprecated_value_regnum_hack (struct value *value)
1552 {
1553 gdb_assert (value->lval == lval_register);
1554 return &value->location.reg.regnum;
1555 }
1556
1557 int
1558 deprecated_value_modifiable (const struct value *value)
1559 {
1560 return value->modifiable;
1561 }
1562 \f
1563 /* Return a mark in the value chain. All values allocated after the
1564 mark is obtained (except for those released) are subject to being freed
1565 if a subsequent value_free_to_mark is passed the mark. */
1566 struct value *
1567 value_mark (void)
1568 {
1569 if (all_values.empty ())
1570 return nullptr;
1571 return all_values.back ().get ();
1572 }
1573
1574 /* See value.h. */
1575
1576 void
1577 value_incref (struct value *val)
1578 {
1579 val->reference_count++;
1580 }
1581
1582 /* Release a reference to VAL, which was acquired with value_incref.
1583 This function is also called to deallocate values from the value
1584 chain. */
1585
1586 void
1587 value_decref (struct value *val)
1588 {
1589 if (val != nullptr)
1590 {
1591 gdb_assert (val->reference_count > 0);
1592 val->reference_count--;
1593 if (val->reference_count == 0)
1594 delete val;
1595 }
1596 }
1597
1598 /* Free all values allocated since MARK was obtained by value_mark
1599 (except for those released). */
1600 void
1601 value_free_to_mark (const struct value *mark)
1602 {
1603 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1604 if (iter == all_values.end ())
1605 all_values.clear ();
1606 else
1607 all_values.erase (iter + 1, all_values.end ());
1608 }
1609
1610 /* Remove VAL from the chain all_values
1611 so it will not be freed automatically. */
1612
1613 value_ref_ptr
1614 release_value (struct value *val)
1615 {
1616 if (val == nullptr)
1617 return value_ref_ptr ();
1618
1619 std::vector<value_ref_ptr>::reverse_iterator iter;
1620 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1621 {
1622 if (*iter == val)
1623 {
1624 value_ref_ptr result = *iter;
1625 all_values.erase (iter.base () - 1);
1626 return result;
1627 }
1628 }
1629
1630 /* We must always return an owned reference. Normally this happens
1631 because we transfer the reference from the value chain, but in
1632 this case the value was not on the chain. */
1633 return value_ref_ptr::new_reference (val);
1634 }
1635
1636 /* See value.h. */
1637
1638 std::vector<value_ref_ptr>
1639 value_release_to_mark (const struct value *mark)
1640 {
1641 std::vector<value_ref_ptr> result;
1642
1643 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1644 if (iter == all_values.end ())
1645 std::swap (result, all_values);
1646 else
1647 {
1648 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1649 all_values.erase (iter + 1, all_values.end ());
1650 }
1651 std::reverse (result.begin (), result.end ());
1652 return result;
1653 }
1654
1655 /* Return a copy of the value ARG.
1656 It contains the same contents, for same memory address,
1657 but it's a different block of storage. */
1658
1659 struct value *
1660 value_copy (struct value *arg)
1661 {
1662 struct type *encl_type = value_enclosing_type (arg);
1663 struct value *val;
1664
1665 if (value_lazy (arg))
1666 val = allocate_value_lazy (encl_type);
1667 else
1668 val = allocate_value (encl_type);
1669 val->type = arg->type;
1670 VALUE_LVAL (val) = VALUE_LVAL (arg);
1671 val->location = arg->location;
1672 val->offset = arg->offset;
1673 val->bitpos = arg->bitpos;
1674 val->bitsize = arg->bitsize;
1675 val->lazy = arg->lazy;
1676 val->embedded_offset = value_embedded_offset (arg);
1677 val->pointed_to_offset = arg->pointed_to_offset;
1678 val->modifiable = arg->modifiable;
1679 if (!value_lazy (val))
1680 {
1681 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1682 TYPE_LENGTH (value_enclosing_type (arg)));
1683
1684 }
1685 val->unavailable = arg->unavailable;
1686 val->optimized_out = arg->optimized_out;
1687 val->parent = arg->parent;
1688 if (VALUE_LVAL (val) == lval_computed)
1689 {
1690 const struct lval_funcs *funcs = val->location.computed.funcs;
1691
1692 if (funcs->copy_closure)
1693 val->location.computed.closure = funcs->copy_closure (val);
1694 }
1695 return val;
1696 }
1697
1698 /* Return a "const" and/or "volatile" qualified version of the value V.
1699 If CNST is true, then the returned value will be qualified with
1700 "const".
1701 if VOLTL is true, then the returned value will be qualified with
1702 "volatile". */
1703
1704 struct value *
1705 make_cv_value (int cnst, int voltl, struct value *v)
1706 {
1707 struct type *val_type = value_type (v);
1708 struct type *enclosing_type = value_enclosing_type (v);
1709 struct value *cv_val = value_copy (v);
1710
1711 deprecated_set_value_type (cv_val,
1712 make_cv_type (cnst, voltl, val_type, NULL));
1713 set_value_enclosing_type (cv_val,
1714 make_cv_type (cnst, voltl, enclosing_type, NULL));
1715
1716 return cv_val;
1717 }
1718
1719 /* Return a version of ARG that is non-lvalue. */
1720
1721 struct value *
1722 value_non_lval (struct value *arg)
1723 {
1724 if (VALUE_LVAL (arg) != not_lval)
1725 {
1726 struct type *enc_type = value_enclosing_type (arg);
1727 struct value *val = allocate_value (enc_type);
1728
1729 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1730 TYPE_LENGTH (enc_type));
1731 val->type = arg->type;
1732 set_value_embedded_offset (val, value_embedded_offset (arg));
1733 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1734 return val;
1735 }
1736 return arg;
1737 }
1738
1739 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1740
1741 void
1742 value_force_lval (struct value *v, CORE_ADDR addr)
1743 {
1744 gdb_assert (VALUE_LVAL (v) == not_lval);
1745
1746 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1747 v->lval = lval_memory;
1748 v->location.address = addr;
1749 }
1750
1751 void
1752 set_value_component_location (struct value *component,
1753 const struct value *whole)
1754 {
1755 struct type *type;
1756
1757 gdb_assert (whole->lval != lval_xcallable);
1758
1759 if (whole->lval == lval_internalvar)
1760 VALUE_LVAL (component) = lval_internalvar_component;
1761 else
1762 VALUE_LVAL (component) = whole->lval;
1763
1764 component->location = whole->location;
1765 if (whole->lval == lval_computed)
1766 {
1767 const struct lval_funcs *funcs = whole->location.computed.funcs;
1768
1769 if (funcs->copy_closure)
1770 component->location.computed.closure = funcs->copy_closure (whole);
1771 }
1772
1773 /* If type has a dynamic resolved location property
1774 update it's value address. */
1775 type = value_type (whole);
1776 if (NULL != TYPE_DATA_LOCATION (type)
1777 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1778 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1779 }
1780
1781 /* Access to the value history. */
1782
1783 /* Record a new value in the value history.
1784 Returns the absolute history index of the entry. */
1785
1786 int
1787 record_latest_value (struct value *val)
1788 {
1789 /* We don't want this value to have anything to do with the inferior anymore.
1790 In particular, "set $1 = 50" should not affect the variable from which
1791 the value was taken, and fast watchpoints should be able to assume that
1792 a value on the value history never changes. */
1793 if (value_lazy (val))
1794 value_fetch_lazy (val);
1795 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1796 from. This is a bit dubious, because then *&$1 does not just return $1
1797 but the current contents of that location. c'est la vie... */
1798 val->modifiable = 0;
1799
1800 value_history.push_back (release_value (val));
1801
1802 return value_history.size ();
1803 }
1804
1805 /* Return a copy of the value in the history with sequence number NUM. */
1806
1807 struct value *
1808 access_value_history (int num)
1809 {
1810 int absnum = num;
1811
1812 if (absnum <= 0)
1813 absnum += value_history.size ();
1814
1815 if (absnum <= 0)
1816 {
1817 if (num == 0)
1818 error (_("The history is empty."));
1819 else if (num == 1)
1820 error (_("There is only one value in the history."));
1821 else
1822 error (_("History does not go back to $$%d."), -num);
1823 }
1824 if (absnum > value_history.size ())
1825 error (_("History has not yet reached $%d."), absnum);
1826
1827 absnum--;
1828
1829 return value_copy (value_history[absnum].get ());
1830 }
1831
1832 static void
1833 show_values (const char *num_exp, int from_tty)
1834 {
1835 int i;
1836 struct value *val;
1837 static int num = 1;
1838
1839 if (num_exp)
1840 {
1841 /* "show values +" should print from the stored position.
1842 "show values <exp>" should print around value number <exp>. */
1843 if (num_exp[0] != '+' || num_exp[1] != '\0')
1844 num = parse_and_eval_long (num_exp) - 5;
1845 }
1846 else
1847 {
1848 /* "show values" means print the last 10 values. */
1849 num = value_history.size () - 9;
1850 }
1851
1852 if (num <= 0)
1853 num = 1;
1854
1855 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1856 {
1857 struct value_print_options opts;
1858
1859 val = access_value_history (i);
1860 printf_filtered (("$%d = "), i);
1861 get_user_print_options (&opts);
1862 value_print (val, gdb_stdout, &opts);
1863 printf_filtered (("\n"));
1864 }
1865
1866 /* The next "show values +" should start after what we just printed. */
1867 num += 10;
1868
1869 /* Hitting just return after this command should do the same thing as
1870 "show values +". If num_exp is null, this is unnecessary, since
1871 "show values +" is not useful after "show values". */
1872 if (from_tty && num_exp)
1873 set_repeat_arguments ("+");
1874 }
1875 \f
1876 enum internalvar_kind
1877 {
1878 /* The internal variable is empty. */
1879 INTERNALVAR_VOID,
1880
1881 /* The value of the internal variable is provided directly as
1882 a GDB value object. */
1883 INTERNALVAR_VALUE,
1884
1885 /* A fresh value is computed via a call-back routine on every
1886 access to the internal variable. */
1887 INTERNALVAR_MAKE_VALUE,
1888
1889 /* The internal variable holds a GDB internal convenience function. */
1890 INTERNALVAR_FUNCTION,
1891
1892 /* The variable holds an integer value. */
1893 INTERNALVAR_INTEGER,
1894
1895 /* The variable holds a GDB-provided string. */
1896 INTERNALVAR_STRING,
1897 };
1898
1899 union internalvar_data
1900 {
1901 /* A value object used with INTERNALVAR_VALUE. */
1902 struct value *value;
1903
1904 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1905 struct
1906 {
1907 /* The functions to call. */
1908 const struct internalvar_funcs *functions;
1909
1910 /* The function's user-data. */
1911 void *data;
1912 } make_value;
1913
1914 /* The internal function used with INTERNALVAR_FUNCTION. */
1915 struct
1916 {
1917 struct internal_function *function;
1918 /* True if this is the canonical name for the function. */
1919 int canonical;
1920 } fn;
1921
1922 /* An integer value used with INTERNALVAR_INTEGER. */
1923 struct
1924 {
1925 /* If type is non-NULL, it will be used as the type to generate
1926 a value for this internal variable. If type is NULL, a default
1927 integer type for the architecture is used. */
1928 struct type *type;
1929 LONGEST val;
1930 } integer;
1931
1932 /* A string value used with INTERNALVAR_STRING. */
1933 char *string;
1934 };
1935
1936 /* Internal variables. These are variables within the debugger
1937 that hold values assigned by debugger commands.
1938 The user refers to them with a '$' prefix
1939 that does not appear in the variable names stored internally. */
1940
1941 struct internalvar
1942 {
1943 struct internalvar *next;
1944 char *name;
1945
1946 /* We support various different kinds of content of an internal variable.
1947 enum internalvar_kind specifies the kind, and union internalvar_data
1948 provides the data associated with this particular kind. */
1949
1950 enum internalvar_kind kind;
1951
1952 union internalvar_data u;
1953 };
1954
1955 static struct internalvar *internalvars;
1956
1957 /* If the variable does not already exist create it and give it the
1958 value given. If no value is given then the default is zero. */
1959 static void
1960 init_if_undefined_command (const char* args, int from_tty)
1961 {
1962 struct internalvar* intvar;
1963
1964 /* Parse the expression - this is taken from set_command(). */
1965 expression_up expr = parse_expression (args);
1966
1967 /* Validate the expression.
1968 Was the expression an assignment?
1969 Or even an expression at all? */
1970 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1971 error (_("Init-if-undefined requires an assignment expression."));
1972
1973 /* Extract the variable from the parsed expression.
1974 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1975 if (expr->elts[1].opcode != OP_INTERNALVAR)
1976 error (_("The first parameter to init-if-undefined "
1977 "should be a GDB variable."));
1978 intvar = expr->elts[2].internalvar;
1979
1980 /* Only evaluate the expression if the lvalue is void.
1981 This may still fail if the expression is invalid. */
1982 if (intvar->kind == INTERNALVAR_VOID)
1983 evaluate_expression (expr.get ());
1984 }
1985
1986
1987 /* Look up an internal variable with name NAME. NAME should not
1988 normally include a dollar sign.
1989
1990 If the specified internal variable does not exist,
1991 the return value is NULL. */
1992
1993 struct internalvar *
1994 lookup_only_internalvar (const char *name)
1995 {
1996 struct internalvar *var;
1997
1998 for (var = internalvars; var; var = var->next)
1999 if (strcmp (var->name, name) == 0)
2000 return var;
2001
2002 return NULL;
2003 }
2004
2005 /* Complete NAME by comparing it to the names of internal
2006 variables. */
2007
2008 void
2009 complete_internalvar (completion_tracker &tracker, const char *name)
2010 {
2011 struct internalvar *var;
2012 int len;
2013
2014 len = strlen (name);
2015
2016 for (var = internalvars; var; var = var->next)
2017 if (strncmp (var->name, name, len) == 0)
2018 tracker.add_completion (make_unique_xstrdup (var->name));
2019 }
2020
2021 /* Create an internal variable with name NAME and with a void value.
2022 NAME should not normally include a dollar sign. */
2023
2024 struct internalvar *
2025 create_internalvar (const char *name)
2026 {
2027 struct internalvar *var = XNEW (struct internalvar);
2028
2029 var->name = xstrdup (name);
2030 var->kind = INTERNALVAR_VOID;
2031 var->next = internalvars;
2032 internalvars = var;
2033 return var;
2034 }
2035
2036 /* Create an internal variable with name NAME and register FUN as the
2037 function that value_of_internalvar uses to create a value whenever
2038 this variable is referenced. NAME should not normally include a
2039 dollar sign. DATA is passed uninterpreted to FUN when it is
2040 called. CLEANUP, if not NULL, is called when the internal variable
2041 is destroyed. It is passed DATA as its only argument. */
2042
2043 struct internalvar *
2044 create_internalvar_type_lazy (const char *name,
2045 const struct internalvar_funcs *funcs,
2046 void *data)
2047 {
2048 struct internalvar *var = create_internalvar (name);
2049
2050 var->kind = INTERNALVAR_MAKE_VALUE;
2051 var->u.make_value.functions = funcs;
2052 var->u.make_value.data = data;
2053 return var;
2054 }
2055
2056 /* See documentation in value.h. */
2057
2058 int
2059 compile_internalvar_to_ax (struct internalvar *var,
2060 struct agent_expr *expr,
2061 struct axs_value *value)
2062 {
2063 if (var->kind != INTERNALVAR_MAKE_VALUE
2064 || var->u.make_value.functions->compile_to_ax == NULL)
2065 return 0;
2066
2067 var->u.make_value.functions->compile_to_ax (var, expr, value,
2068 var->u.make_value.data);
2069 return 1;
2070 }
2071
2072 /* Look up an internal variable with name NAME. NAME should not
2073 normally include a dollar sign.
2074
2075 If the specified internal variable does not exist,
2076 one is created, with a void value. */
2077
2078 struct internalvar *
2079 lookup_internalvar (const char *name)
2080 {
2081 struct internalvar *var;
2082
2083 var = lookup_only_internalvar (name);
2084 if (var)
2085 return var;
2086
2087 return create_internalvar (name);
2088 }
2089
2090 /* Return current value of internal variable VAR. For variables that
2091 are not inherently typed, use a value type appropriate for GDBARCH. */
2092
2093 struct value *
2094 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2095 {
2096 struct value *val;
2097 struct trace_state_variable *tsv;
2098
2099 /* If there is a trace state variable of the same name, assume that
2100 is what we really want to see. */
2101 tsv = find_trace_state_variable (var->name);
2102 if (tsv)
2103 {
2104 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2105 &(tsv->value));
2106 if (tsv->value_known)
2107 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2108 tsv->value);
2109 else
2110 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2111 return val;
2112 }
2113
2114 switch (var->kind)
2115 {
2116 case INTERNALVAR_VOID:
2117 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2118 break;
2119
2120 case INTERNALVAR_FUNCTION:
2121 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2122 break;
2123
2124 case INTERNALVAR_INTEGER:
2125 if (!var->u.integer.type)
2126 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2127 var->u.integer.val);
2128 else
2129 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2130 break;
2131
2132 case INTERNALVAR_STRING:
2133 val = value_cstring (var->u.string, strlen (var->u.string),
2134 builtin_type (gdbarch)->builtin_char);
2135 break;
2136
2137 case INTERNALVAR_VALUE:
2138 val = value_copy (var->u.value);
2139 if (value_lazy (val))
2140 value_fetch_lazy (val);
2141 break;
2142
2143 case INTERNALVAR_MAKE_VALUE:
2144 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2145 var->u.make_value.data);
2146 break;
2147
2148 default:
2149 internal_error (__FILE__, __LINE__, _("bad kind"));
2150 }
2151
2152 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2153 on this value go back to affect the original internal variable.
2154
2155 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2156 no underlying modifiable state in the internal variable.
2157
2158 Likewise, if the variable's value is a computed lvalue, we want
2159 references to it to produce another computed lvalue, where
2160 references and assignments actually operate through the
2161 computed value's functions.
2162
2163 This means that internal variables with computed values
2164 behave a little differently from other internal variables:
2165 assignments to them don't just replace the previous value
2166 altogether. At the moment, this seems like the behavior we
2167 want. */
2168
2169 if (var->kind != INTERNALVAR_MAKE_VALUE
2170 && val->lval != lval_computed)
2171 {
2172 VALUE_LVAL (val) = lval_internalvar;
2173 VALUE_INTERNALVAR (val) = var;
2174 }
2175
2176 return val;
2177 }
2178
2179 int
2180 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2181 {
2182 if (var->kind == INTERNALVAR_INTEGER)
2183 {
2184 *result = var->u.integer.val;
2185 return 1;
2186 }
2187
2188 if (var->kind == INTERNALVAR_VALUE)
2189 {
2190 struct type *type = check_typedef (value_type (var->u.value));
2191
2192 if (TYPE_CODE (type) == TYPE_CODE_INT)
2193 {
2194 *result = value_as_long (var->u.value);
2195 return 1;
2196 }
2197 }
2198
2199 return 0;
2200 }
2201
2202 static int
2203 get_internalvar_function (struct internalvar *var,
2204 struct internal_function **result)
2205 {
2206 switch (var->kind)
2207 {
2208 case INTERNALVAR_FUNCTION:
2209 *result = var->u.fn.function;
2210 return 1;
2211
2212 default:
2213 return 0;
2214 }
2215 }
2216
2217 void
2218 set_internalvar_component (struct internalvar *var,
2219 LONGEST offset, LONGEST bitpos,
2220 LONGEST bitsize, struct value *newval)
2221 {
2222 gdb_byte *addr;
2223 struct gdbarch *arch;
2224 int unit_size;
2225
2226 switch (var->kind)
2227 {
2228 case INTERNALVAR_VALUE:
2229 addr = value_contents_writeable (var->u.value);
2230 arch = get_value_arch (var->u.value);
2231 unit_size = gdbarch_addressable_memory_unit_size (arch);
2232
2233 if (bitsize)
2234 modify_field (value_type (var->u.value), addr + offset,
2235 value_as_long (newval), bitpos, bitsize);
2236 else
2237 memcpy (addr + offset * unit_size, value_contents (newval),
2238 TYPE_LENGTH (value_type (newval)));
2239 break;
2240
2241 default:
2242 /* We can never get a component of any other kind. */
2243 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2244 }
2245 }
2246
2247 void
2248 set_internalvar (struct internalvar *var, struct value *val)
2249 {
2250 enum internalvar_kind new_kind;
2251 union internalvar_data new_data = { 0 };
2252
2253 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2254 error (_("Cannot overwrite convenience function %s"), var->name);
2255
2256 /* Prepare new contents. */
2257 switch (TYPE_CODE (check_typedef (value_type (val))))
2258 {
2259 case TYPE_CODE_VOID:
2260 new_kind = INTERNALVAR_VOID;
2261 break;
2262
2263 case TYPE_CODE_INTERNAL_FUNCTION:
2264 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2265 new_kind = INTERNALVAR_FUNCTION;
2266 get_internalvar_function (VALUE_INTERNALVAR (val),
2267 &new_data.fn.function);
2268 /* Copies created here are never canonical. */
2269 break;
2270
2271 default:
2272 new_kind = INTERNALVAR_VALUE;
2273 struct value *copy = value_copy (val);
2274 copy->modifiable = 1;
2275
2276 /* Force the value to be fetched from the target now, to avoid problems
2277 later when this internalvar is referenced and the target is gone or
2278 has changed. */
2279 if (value_lazy (copy))
2280 value_fetch_lazy (copy);
2281
2282 /* Release the value from the value chain to prevent it from being
2283 deleted by free_all_values. From here on this function should not
2284 call error () until new_data is installed into the var->u to avoid
2285 leaking memory. */
2286 new_data.value = release_value (copy).release ();
2287
2288 /* Internal variables which are created from values with a dynamic
2289 location don't need the location property of the origin anymore.
2290 The resolved dynamic location is used prior then any other address
2291 when accessing the value.
2292 If we keep it, we would still refer to the origin value.
2293 Remove the location property in case it exist. */
2294 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2295
2296 break;
2297 }
2298
2299 /* Clean up old contents. */
2300 clear_internalvar (var);
2301
2302 /* Switch over. */
2303 var->kind = new_kind;
2304 var->u = new_data;
2305 /* End code which must not call error(). */
2306 }
2307
2308 void
2309 set_internalvar_integer (struct internalvar *var, LONGEST l)
2310 {
2311 /* Clean up old contents. */
2312 clear_internalvar (var);
2313
2314 var->kind = INTERNALVAR_INTEGER;
2315 var->u.integer.type = NULL;
2316 var->u.integer.val = l;
2317 }
2318
2319 void
2320 set_internalvar_string (struct internalvar *var, const char *string)
2321 {
2322 /* Clean up old contents. */
2323 clear_internalvar (var);
2324
2325 var->kind = INTERNALVAR_STRING;
2326 var->u.string = xstrdup (string);
2327 }
2328
2329 static void
2330 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2331 {
2332 /* Clean up old contents. */
2333 clear_internalvar (var);
2334
2335 var->kind = INTERNALVAR_FUNCTION;
2336 var->u.fn.function = f;
2337 var->u.fn.canonical = 1;
2338 /* Variables installed here are always the canonical version. */
2339 }
2340
2341 void
2342 clear_internalvar (struct internalvar *var)
2343 {
2344 /* Clean up old contents. */
2345 switch (var->kind)
2346 {
2347 case INTERNALVAR_VALUE:
2348 value_decref (var->u.value);
2349 break;
2350
2351 case INTERNALVAR_STRING:
2352 xfree (var->u.string);
2353 break;
2354
2355 case INTERNALVAR_MAKE_VALUE:
2356 if (var->u.make_value.functions->destroy != NULL)
2357 var->u.make_value.functions->destroy (var->u.make_value.data);
2358 break;
2359
2360 default:
2361 break;
2362 }
2363
2364 /* Reset to void kind. */
2365 var->kind = INTERNALVAR_VOID;
2366 }
2367
2368 char *
2369 internalvar_name (const struct internalvar *var)
2370 {
2371 return var->name;
2372 }
2373
2374 static struct internal_function *
2375 create_internal_function (const char *name,
2376 internal_function_fn handler, void *cookie)
2377 {
2378 struct internal_function *ifn = XNEW (struct internal_function);
2379
2380 ifn->name = xstrdup (name);
2381 ifn->handler = handler;
2382 ifn->cookie = cookie;
2383 return ifn;
2384 }
2385
2386 char *
2387 value_internal_function_name (struct value *val)
2388 {
2389 struct internal_function *ifn;
2390 int result;
2391
2392 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2393 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2394 gdb_assert (result);
2395
2396 return ifn->name;
2397 }
2398
2399 struct value *
2400 call_internal_function (struct gdbarch *gdbarch,
2401 const struct language_defn *language,
2402 struct value *func, int argc, struct value **argv)
2403 {
2404 struct internal_function *ifn;
2405 int result;
2406
2407 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2408 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2409 gdb_assert (result);
2410
2411 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2412 }
2413
2414 /* The 'function' command. This does nothing -- it is just a
2415 placeholder to let "help function NAME" work. This is also used as
2416 the implementation of the sub-command that is created when
2417 registering an internal function. */
2418 static void
2419 function_command (const char *command, int from_tty)
2420 {
2421 /* Do nothing. */
2422 }
2423
2424 /* Helper function that does the work for add_internal_function. */
2425
2426 static struct cmd_list_element *
2427 do_add_internal_function (const char *name, const char *doc,
2428 internal_function_fn handler, void *cookie)
2429 {
2430 struct internal_function *ifn;
2431 struct internalvar *var = lookup_internalvar (name);
2432
2433 ifn = create_internal_function (name, handler, cookie);
2434 set_internalvar_function (var, ifn);
2435
2436 return add_cmd (name, no_class, function_command, doc, &functionlist);
2437 }
2438
2439 /* See value.h. */
2440
2441 void
2442 add_internal_function (const char *name, const char *doc,
2443 internal_function_fn handler, void *cookie)
2444 {
2445 do_add_internal_function (name, doc, handler, cookie);
2446 }
2447
2448 /* See value.h. */
2449
2450 void
2451 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2452 gdb::unique_xmalloc_ptr<char> &&doc,
2453 internal_function_fn handler, void *cookie)
2454 {
2455 struct cmd_list_element *cmd
2456 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2457 doc.release ();
2458 cmd->doc_allocated = 1;
2459 name.release ();
2460 cmd->name_allocated = 1;
2461 }
2462
2463 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2464 prevent cycles / duplicates. */
2465
2466 void
2467 preserve_one_value (struct value *value, struct objfile *objfile,
2468 htab_t copied_types)
2469 {
2470 if (TYPE_OBJFILE (value->type) == objfile)
2471 value->type = copy_type_recursive (objfile, value->type, copied_types);
2472
2473 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2474 value->enclosing_type = copy_type_recursive (objfile,
2475 value->enclosing_type,
2476 copied_types);
2477 }
2478
2479 /* Likewise for internal variable VAR. */
2480
2481 static void
2482 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2483 htab_t copied_types)
2484 {
2485 switch (var->kind)
2486 {
2487 case INTERNALVAR_INTEGER:
2488 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2489 var->u.integer.type
2490 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2491 break;
2492
2493 case INTERNALVAR_VALUE:
2494 preserve_one_value (var->u.value, objfile, copied_types);
2495 break;
2496 }
2497 }
2498
2499 /* Update the internal variables and value history when OBJFILE is
2500 discarded; we must copy the types out of the objfile. New global types
2501 will be created for every convenience variable which currently points to
2502 this objfile's types, and the convenience variables will be adjusted to
2503 use the new global types. */
2504
2505 void
2506 preserve_values (struct objfile *objfile)
2507 {
2508 htab_t copied_types;
2509 struct internalvar *var;
2510
2511 /* Create the hash table. We allocate on the objfile's obstack, since
2512 it is soon to be deleted. */
2513 copied_types = create_copied_types_hash (objfile);
2514
2515 for (const value_ref_ptr &item : value_history)
2516 preserve_one_value (item.get (), objfile, copied_types);
2517
2518 for (var = internalvars; var; var = var->next)
2519 preserve_one_internalvar (var, objfile, copied_types);
2520
2521 preserve_ext_lang_values (objfile, copied_types);
2522
2523 htab_delete (copied_types);
2524 }
2525
2526 static void
2527 show_convenience (const char *ignore, int from_tty)
2528 {
2529 struct gdbarch *gdbarch = get_current_arch ();
2530 struct internalvar *var;
2531 int varseen = 0;
2532 struct value_print_options opts;
2533
2534 get_user_print_options (&opts);
2535 for (var = internalvars; var; var = var->next)
2536 {
2537
2538 if (!varseen)
2539 {
2540 varseen = 1;
2541 }
2542 printf_filtered (("$%s = "), var->name);
2543
2544 try
2545 {
2546 struct value *val;
2547
2548 val = value_of_internalvar (gdbarch, var);
2549 value_print (val, gdb_stdout, &opts);
2550 }
2551 catch (const gdb_exception_error &ex)
2552 {
2553 fprintf_styled (gdb_stdout, metadata_style.style (),
2554 _("<error: %s>"), ex.what ());
2555 }
2556
2557 printf_filtered (("\n"));
2558 }
2559 if (!varseen)
2560 {
2561 /* This text does not mention convenience functions on purpose.
2562 The user can't create them except via Python, and if Python support
2563 is installed this message will never be printed ($_streq will
2564 exist). */
2565 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2566 "Convenience variables have "
2567 "names starting with \"$\";\n"
2568 "use \"set\" as in \"set "
2569 "$foo = 5\" to define them.\n"));
2570 }
2571 }
2572 \f
2573
2574 /* See value.h. */
2575
2576 struct value *
2577 value_from_xmethod (xmethod_worker_up &&worker)
2578 {
2579 struct value *v;
2580
2581 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2582 v->lval = lval_xcallable;
2583 v->location.xm_worker = worker.release ();
2584 v->modifiable = 0;
2585
2586 return v;
2587 }
2588
2589 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2590
2591 struct type *
2592 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2593 {
2594 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2595 && method->lval == lval_xcallable && !argv.empty ());
2596
2597 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2598 }
2599
2600 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2601
2602 struct value *
2603 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2604 {
2605 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2606 && method->lval == lval_xcallable && !argv.empty ());
2607
2608 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2609 }
2610 \f
2611 /* Extract a value as a C number (either long or double).
2612 Knows how to convert fixed values to double, or
2613 floating values to long.
2614 Does not deallocate the value. */
2615
2616 LONGEST
2617 value_as_long (struct value *val)
2618 {
2619 /* This coerces arrays and functions, which is necessary (e.g.
2620 in disassemble_command). It also dereferences references, which
2621 I suspect is the most logical thing to do. */
2622 val = coerce_array (val);
2623 return unpack_long (value_type (val), value_contents (val));
2624 }
2625
2626 /* Extract a value as a C pointer. Does not deallocate the value.
2627 Note that val's type may not actually be a pointer; value_as_long
2628 handles all the cases. */
2629 CORE_ADDR
2630 value_as_address (struct value *val)
2631 {
2632 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2633
2634 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2635 whether we want this to be true eventually. */
2636 #if 0
2637 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2638 non-address (e.g. argument to "signal", "info break", etc.), or
2639 for pointers to char, in which the low bits *are* significant. */
2640 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2641 #else
2642
2643 /* There are several targets (IA-64, PowerPC, and others) which
2644 don't represent pointers to functions as simply the address of
2645 the function's entry point. For example, on the IA-64, a
2646 function pointer points to a two-word descriptor, generated by
2647 the linker, which contains the function's entry point, and the
2648 value the IA-64 "global pointer" register should have --- to
2649 support position-independent code. The linker generates
2650 descriptors only for those functions whose addresses are taken.
2651
2652 On such targets, it's difficult for GDB to convert an arbitrary
2653 function address into a function pointer; it has to either find
2654 an existing descriptor for that function, or call malloc and
2655 build its own. On some targets, it is impossible for GDB to
2656 build a descriptor at all: the descriptor must contain a jump
2657 instruction; data memory cannot be executed; and code memory
2658 cannot be modified.
2659
2660 Upon entry to this function, if VAL is a value of type `function'
2661 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2662 value_address (val) is the address of the function. This is what
2663 you'll get if you evaluate an expression like `main'. The call
2664 to COERCE_ARRAY below actually does all the usual unary
2665 conversions, which includes converting values of type `function'
2666 to `pointer to function'. This is the challenging conversion
2667 discussed above. Then, `unpack_long' will convert that pointer
2668 back into an address.
2669
2670 So, suppose the user types `disassemble foo' on an architecture
2671 with a strange function pointer representation, on which GDB
2672 cannot build its own descriptors, and suppose further that `foo'
2673 has no linker-built descriptor. The address->pointer conversion
2674 will signal an error and prevent the command from running, even
2675 though the next step would have been to convert the pointer
2676 directly back into the same address.
2677
2678 The following shortcut avoids this whole mess. If VAL is a
2679 function, just return its address directly. */
2680 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2681 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2682 return value_address (val);
2683
2684 val = coerce_array (val);
2685
2686 /* Some architectures (e.g. Harvard), map instruction and data
2687 addresses onto a single large unified address space. For
2688 instance: An architecture may consider a large integer in the
2689 range 0x10000000 .. 0x1000ffff to already represent a data
2690 addresses (hence not need a pointer to address conversion) while
2691 a small integer would still need to be converted integer to
2692 pointer to address. Just assume such architectures handle all
2693 integer conversions in a single function. */
2694
2695 /* JimB writes:
2696
2697 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2698 must admonish GDB hackers to make sure its behavior matches the
2699 compiler's, whenever possible.
2700
2701 In general, I think GDB should evaluate expressions the same way
2702 the compiler does. When the user copies an expression out of
2703 their source code and hands it to a `print' command, they should
2704 get the same value the compiler would have computed. Any
2705 deviation from this rule can cause major confusion and annoyance,
2706 and needs to be justified carefully. In other words, GDB doesn't
2707 really have the freedom to do these conversions in clever and
2708 useful ways.
2709
2710 AndrewC pointed out that users aren't complaining about how GDB
2711 casts integers to pointers; they are complaining that they can't
2712 take an address from a disassembly listing and give it to `x/i'.
2713 This is certainly important.
2714
2715 Adding an architecture method like integer_to_address() certainly
2716 makes it possible for GDB to "get it right" in all circumstances
2717 --- the target has complete control over how things get done, so
2718 people can Do The Right Thing for their target without breaking
2719 anyone else. The standard doesn't specify how integers get
2720 converted to pointers; usually, the ABI doesn't either, but
2721 ABI-specific code is a more reasonable place to handle it. */
2722
2723 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2724 && !TYPE_IS_REFERENCE (value_type (val))
2725 && gdbarch_integer_to_address_p (gdbarch))
2726 return gdbarch_integer_to_address (gdbarch, value_type (val),
2727 value_contents (val));
2728
2729 return unpack_long (value_type (val), value_contents (val));
2730 #endif
2731 }
2732 \f
2733 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2734 as a long, or as a double, assuming the raw data is described
2735 by type TYPE. Knows how to convert different sizes of values
2736 and can convert between fixed and floating point. We don't assume
2737 any alignment for the raw data. Return value is in host byte order.
2738
2739 If you want functions and arrays to be coerced to pointers, and
2740 references to be dereferenced, call value_as_long() instead.
2741
2742 C++: It is assumed that the front-end has taken care of
2743 all matters concerning pointers to members. A pointer
2744 to member which reaches here is considered to be equivalent
2745 to an INT (or some size). After all, it is only an offset. */
2746
2747 LONGEST
2748 unpack_long (struct type *type, const gdb_byte *valaddr)
2749 {
2750 enum bfd_endian byte_order = type_byte_order (type);
2751 enum type_code code = TYPE_CODE (type);
2752 int len = TYPE_LENGTH (type);
2753 int nosign = TYPE_UNSIGNED (type);
2754
2755 switch (code)
2756 {
2757 case TYPE_CODE_TYPEDEF:
2758 return unpack_long (check_typedef (type), valaddr);
2759 case TYPE_CODE_ENUM:
2760 case TYPE_CODE_FLAGS:
2761 case TYPE_CODE_BOOL:
2762 case TYPE_CODE_INT:
2763 case TYPE_CODE_CHAR:
2764 case TYPE_CODE_RANGE:
2765 case TYPE_CODE_MEMBERPTR:
2766 {
2767 LONGEST result;
2768 if (nosign)
2769 result = extract_unsigned_integer (valaddr, len, byte_order);
2770 else
2771 result = extract_signed_integer (valaddr, len, byte_order);
2772 if (code == TYPE_CODE_RANGE)
2773 result += TYPE_RANGE_DATA (type)->bias;
2774 return result;
2775 }
2776
2777 case TYPE_CODE_FLT:
2778 case TYPE_CODE_DECFLOAT:
2779 return target_float_to_longest (valaddr, type);
2780
2781 case TYPE_CODE_PTR:
2782 case TYPE_CODE_REF:
2783 case TYPE_CODE_RVALUE_REF:
2784 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2785 whether we want this to be true eventually. */
2786 return extract_typed_address (valaddr, type);
2787
2788 default:
2789 error (_("Value can't be converted to integer."));
2790 }
2791 }
2792
2793 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2794 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2795 We don't assume any alignment for the raw data. Return value is in
2796 host byte order.
2797
2798 If you want functions and arrays to be coerced to pointers, and
2799 references to be dereferenced, call value_as_address() instead.
2800
2801 C++: It is assumed that the front-end has taken care of
2802 all matters concerning pointers to members. A pointer
2803 to member which reaches here is considered to be equivalent
2804 to an INT (or some size). After all, it is only an offset. */
2805
2806 CORE_ADDR
2807 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2808 {
2809 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2810 whether we want this to be true eventually. */
2811 return unpack_long (type, valaddr);
2812 }
2813
2814 bool
2815 is_floating_value (struct value *val)
2816 {
2817 struct type *type = check_typedef (value_type (val));
2818
2819 if (is_floating_type (type))
2820 {
2821 if (!target_float_is_valid (value_contents (val), type))
2822 error (_("Invalid floating value found in program."));
2823 return true;
2824 }
2825
2826 return false;
2827 }
2828
2829 \f
2830 /* Get the value of the FIELDNO'th field (which must be static) of
2831 TYPE. */
2832
2833 struct value *
2834 value_static_field (struct type *type, int fieldno)
2835 {
2836 struct value *retval;
2837
2838 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2839 {
2840 case FIELD_LOC_KIND_PHYSADDR:
2841 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2842 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2843 break;
2844 case FIELD_LOC_KIND_PHYSNAME:
2845 {
2846 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2847 /* TYPE_FIELD_NAME (type, fieldno); */
2848 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2849
2850 if (sym.symbol == NULL)
2851 {
2852 /* With some compilers, e.g. HP aCC, static data members are
2853 reported as non-debuggable symbols. */
2854 struct bound_minimal_symbol msym
2855 = lookup_minimal_symbol (phys_name, NULL, NULL);
2856 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2857
2858 if (!msym.minsym)
2859 retval = allocate_optimized_out_value (field_type);
2860 else
2861 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2862 }
2863 else
2864 retval = value_of_variable (sym.symbol, sym.block);
2865 break;
2866 }
2867 default:
2868 gdb_assert_not_reached ("unexpected field location kind");
2869 }
2870
2871 return retval;
2872 }
2873
2874 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2875 You have to be careful here, since the size of the data area for the value
2876 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2877 than the old enclosing type, you have to allocate more space for the
2878 data. */
2879
2880 void
2881 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2882 {
2883 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2884 {
2885 check_type_length_before_alloc (new_encl_type);
2886 val->contents
2887 .reset ((gdb_byte *) xrealloc (val->contents.release (),
2888 TYPE_LENGTH (new_encl_type)));
2889 }
2890
2891 val->enclosing_type = new_encl_type;
2892 }
2893
2894 /* Given a value ARG1 (offset by OFFSET bytes)
2895 of a struct or union type ARG_TYPE,
2896 extract and return the value of one of its (non-static) fields.
2897 FIELDNO says which field. */
2898
2899 struct value *
2900 value_primitive_field (struct value *arg1, LONGEST offset,
2901 int fieldno, struct type *arg_type)
2902 {
2903 struct value *v;
2904 struct type *type;
2905 struct gdbarch *arch = get_value_arch (arg1);
2906 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2907
2908 arg_type = check_typedef (arg_type);
2909 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2910
2911 /* Call check_typedef on our type to make sure that, if TYPE
2912 is a TYPE_CODE_TYPEDEF, its length is set to the length
2913 of the target type instead of zero. However, we do not
2914 replace the typedef type by the target type, because we want
2915 to keep the typedef in order to be able to print the type
2916 description correctly. */
2917 check_typedef (type);
2918
2919 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2920 {
2921 /* Handle packed fields.
2922
2923 Create a new value for the bitfield, with bitpos and bitsize
2924 set. If possible, arrange offset and bitpos so that we can
2925 do a single aligned read of the size of the containing type.
2926 Otherwise, adjust offset to the byte containing the first
2927 bit. Assume that the address, offset, and embedded offset
2928 are sufficiently aligned. */
2929
2930 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2931 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2932
2933 v = allocate_value_lazy (type);
2934 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2935 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2936 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2937 v->bitpos = bitpos % container_bitsize;
2938 else
2939 v->bitpos = bitpos % 8;
2940 v->offset = (value_embedded_offset (arg1)
2941 + offset
2942 + (bitpos - v->bitpos) / 8);
2943 set_value_parent (v, arg1);
2944 if (!value_lazy (arg1))
2945 value_fetch_lazy (v);
2946 }
2947 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2948 {
2949 /* This field is actually a base subobject, so preserve the
2950 entire object's contents for later references to virtual
2951 bases, etc. */
2952 LONGEST boffset;
2953
2954 /* Lazy register values with offsets are not supported. */
2955 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2956 value_fetch_lazy (arg1);
2957
2958 /* We special case virtual inheritance here because this
2959 requires access to the contents, which we would rather avoid
2960 for references to ordinary fields of unavailable values. */
2961 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2962 boffset = baseclass_offset (arg_type, fieldno,
2963 value_contents (arg1),
2964 value_embedded_offset (arg1),
2965 value_address (arg1),
2966 arg1);
2967 else
2968 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2969
2970 if (value_lazy (arg1))
2971 v = allocate_value_lazy (value_enclosing_type (arg1));
2972 else
2973 {
2974 v = allocate_value (value_enclosing_type (arg1));
2975 value_contents_copy_raw (v, 0, arg1, 0,
2976 TYPE_LENGTH (value_enclosing_type (arg1)));
2977 }
2978 v->type = type;
2979 v->offset = value_offset (arg1);
2980 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2981 }
2982 else if (NULL != TYPE_DATA_LOCATION (type))
2983 {
2984 /* Field is a dynamic data member. */
2985
2986 gdb_assert (0 == offset);
2987 /* We expect an already resolved data location. */
2988 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2989 /* For dynamic data types defer memory allocation
2990 until we actual access the value. */
2991 v = allocate_value_lazy (type);
2992 }
2993 else
2994 {
2995 /* Plain old data member */
2996 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
2997 / (HOST_CHAR_BIT * unit_size));
2998
2999 /* Lazy register values with offsets are not supported. */
3000 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3001 value_fetch_lazy (arg1);
3002
3003 if (value_lazy (arg1))
3004 v = allocate_value_lazy (type);
3005 else
3006 {
3007 v = allocate_value (type);
3008 value_contents_copy_raw (v, value_embedded_offset (v),
3009 arg1, value_embedded_offset (arg1) + offset,
3010 type_length_units (type));
3011 }
3012 v->offset = (value_offset (arg1) + offset
3013 + value_embedded_offset (arg1));
3014 }
3015 set_value_component_location (v, arg1);
3016 return v;
3017 }
3018
3019 /* Given a value ARG1 of a struct or union type,
3020 extract and return the value of one of its (non-static) fields.
3021 FIELDNO says which field. */
3022
3023 struct value *
3024 value_field (struct value *arg1, int fieldno)
3025 {
3026 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3027 }
3028
3029 /* Return a non-virtual function as a value.
3030 F is the list of member functions which contains the desired method.
3031 J is an index into F which provides the desired method.
3032
3033 We only use the symbol for its address, so be happy with either a
3034 full symbol or a minimal symbol. */
3035
3036 struct value *
3037 value_fn_field (struct value **arg1p, struct fn_field *f,
3038 int j, struct type *type,
3039 LONGEST offset)
3040 {
3041 struct value *v;
3042 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3043 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3044 struct symbol *sym;
3045 struct bound_minimal_symbol msym;
3046
3047 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3048 if (sym != NULL)
3049 {
3050 memset (&msym, 0, sizeof (msym));
3051 }
3052 else
3053 {
3054 gdb_assert (sym == NULL);
3055 msym = lookup_bound_minimal_symbol (physname);
3056 if (msym.minsym == NULL)
3057 return NULL;
3058 }
3059
3060 v = allocate_value (ftype);
3061 VALUE_LVAL (v) = lval_memory;
3062 if (sym)
3063 {
3064 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3065 }
3066 else
3067 {
3068 /* The minimal symbol might point to a function descriptor;
3069 resolve it to the actual code address instead. */
3070 struct objfile *objfile = msym.objfile;
3071 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3072
3073 set_value_address (v,
3074 gdbarch_convert_from_func_ptr_addr
3075 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3076 }
3077
3078 if (arg1p)
3079 {
3080 if (type != value_type (*arg1p))
3081 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3082 value_addr (*arg1p)));
3083
3084 /* Move the `this' pointer according to the offset.
3085 VALUE_OFFSET (*arg1p) += offset; */
3086 }
3087
3088 return v;
3089 }
3090
3091 \f
3092
3093 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3094 VALADDR, and store the result in *RESULT.
3095 The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3096 BITSIZE is zero, then the length is taken from FIELD_TYPE.
3097
3098 Extracting bits depends on endianness of the machine. Compute the
3099 number of least significant bits to discard. For big endian machines,
3100 we compute the total number of bits in the anonymous object, subtract
3101 off the bit count from the MSB of the object to the MSB of the
3102 bitfield, then the size of the bitfield, which leaves the LSB discard
3103 count. For little endian machines, the discard count is simply the
3104 number of bits from the LSB of the anonymous object to the LSB of the
3105 bitfield.
3106
3107 If the field is signed, we also do sign extension. */
3108
3109 static LONGEST
3110 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3111 LONGEST bitpos, LONGEST bitsize)
3112 {
3113 enum bfd_endian byte_order = type_byte_order (field_type);
3114 ULONGEST val;
3115 ULONGEST valmask;
3116 int lsbcount;
3117 LONGEST bytes_read;
3118 LONGEST read_offset;
3119
3120 /* Read the minimum number of bytes required; there may not be
3121 enough bytes to read an entire ULONGEST. */
3122 field_type = check_typedef (field_type);
3123 if (bitsize)
3124 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3125 else
3126 {
3127 bytes_read = TYPE_LENGTH (field_type);
3128 bitsize = 8 * bytes_read;
3129 }
3130
3131 read_offset = bitpos / 8;
3132
3133 val = extract_unsigned_integer (valaddr + read_offset,
3134 bytes_read, byte_order);
3135
3136 /* Extract bits. See comment above. */
3137
3138 if (byte_order == BFD_ENDIAN_BIG)
3139 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3140 else
3141 lsbcount = (bitpos % 8);
3142 val >>= lsbcount;
3143
3144 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3145 If the field is signed, and is negative, then sign extend. */
3146
3147 if (bitsize < 8 * (int) sizeof (val))
3148 {
3149 valmask = (((ULONGEST) 1) << bitsize) - 1;
3150 val &= valmask;
3151 if (!TYPE_UNSIGNED (field_type))
3152 {
3153 if (val & (valmask ^ (valmask >> 1)))
3154 {
3155 val |= ~valmask;
3156 }
3157 }
3158 }
3159
3160 return val;
3161 }
3162
3163 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3164 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3165 ORIGINAL_VALUE, which must not be NULL. See
3166 unpack_value_bits_as_long for more details. */
3167
3168 int
3169 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3170 LONGEST embedded_offset, int fieldno,
3171 const struct value *val, LONGEST *result)
3172 {
3173 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3174 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3175 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3176 int bit_offset;
3177
3178 gdb_assert (val != NULL);
3179
3180 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3181 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3182 || !value_bits_available (val, bit_offset, bitsize))
3183 return 0;
3184
3185 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3186 bitpos, bitsize);
3187 return 1;
3188 }
3189
3190 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3191 object at VALADDR. See unpack_bits_as_long for more details. */
3192
3193 LONGEST
3194 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3195 {
3196 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3197 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3198 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3199
3200 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3201 }
3202
3203 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3204 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3205 the contents in DEST_VAL, zero or sign extending if the type of
3206 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3207 VAL. If the VAL's contents required to extract the bitfield from
3208 are unavailable/optimized out, DEST_VAL is correspondingly
3209 marked unavailable/optimized out. */
3210
3211 void
3212 unpack_value_bitfield (struct value *dest_val,
3213 LONGEST bitpos, LONGEST bitsize,
3214 const gdb_byte *valaddr, LONGEST embedded_offset,
3215 const struct value *val)
3216 {
3217 enum bfd_endian byte_order;
3218 int src_bit_offset;
3219 int dst_bit_offset;
3220 struct type *field_type = value_type (dest_val);
3221
3222 byte_order = type_byte_order (field_type);
3223
3224 /* First, unpack and sign extend the bitfield as if it was wholly
3225 valid. Optimized out/unavailable bits are read as zero, but
3226 that's OK, as they'll end up marked below. If the VAL is
3227 wholly-invalid we may have skipped allocating its contents,
3228 though. See allocate_optimized_out_value. */
3229 if (valaddr != NULL)
3230 {
3231 LONGEST num;
3232
3233 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3234 bitpos, bitsize);
3235 store_signed_integer (value_contents_raw (dest_val),
3236 TYPE_LENGTH (field_type), byte_order, num);
3237 }
3238
3239 /* Now copy the optimized out / unavailability ranges to the right
3240 bits. */
3241 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3242 if (byte_order == BFD_ENDIAN_BIG)
3243 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3244 else
3245 dst_bit_offset = 0;
3246 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3247 val, src_bit_offset, bitsize);
3248 }
3249
3250 /* Return a new value with type TYPE, which is FIELDNO field of the
3251 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3252 of VAL. If the VAL's contents required to extract the bitfield
3253 from are unavailable/optimized out, the new value is
3254 correspondingly marked unavailable/optimized out. */
3255
3256 struct value *
3257 value_field_bitfield (struct type *type, int fieldno,
3258 const gdb_byte *valaddr,
3259 LONGEST embedded_offset, const struct value *val)
3260 {
3261 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3262 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3263 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3264
3265 unpack_value_bitfield (res_val, bitpos, bitsize,
3266 valaddr, embedded_offset, val);
3267
3268 return res_val;
3269 }
3270
3271 /* Modify the value of a bitfield. ADDR points to a block of memory in
3272 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3273 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3274 indicate which bits (in target bit order) comprise the bitfield.
3275 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3276 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3277
3278 void
3279 modify_field (struct type *type, gdb_byte *addr,
3280 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3281 {
3282 enum bfd_endian byte_order = type_byte_order (type);
3283 ULONGEST oword;
3284 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3285 LONGEST bytesize;
3286
3287 /* Normalize BITPOS. */
3288 addr += bitpos / 8;
3289 bitpos %= 8;
3290
3291 /* If a negative fieldval fits in the field in question, chop
3292 off the sign extension bits. */
3293 if ((~fieldval & ~(mask >> 1)) == 0)
3294 fieldval &= mask;
3295
3296 /* Warn if value is too big to fit in the field in question. */
3297 if (0 != (fieldval & ~mask))
3298 {
3299 /* FIXME: would like to include fieldval in the message, but
3300 we don't have a sprintf_longest. */
3301 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3302
3303 /* Truncate it, otherwise adjoining fields may be corrupted. */
3304 fieldval &= mask;
3305 }
3306
3307 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3308 false valgrind reports. */
3309
3310 bytesize = (bitpos + bitsize + 7) / 8;
3311 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3312
3313 /* Shifting for bit field depends on endianness of the target machine. */
3314 if (byte_order == BFD_ENDIAN_BIG)
3315 bitpos = bytesize * 8 - bitpos - bitsize;
3316
3317 oword &= ~(mask << bitpos);
3318 oword |= fieldval << bitpos;
3319
3320 store_unsigned_integer (addr, bytesize, byte_order, oword);
3321 }
3322 \f
3323 /* Pack NUM into BUF using a target format of TYPE. */
3324
3325 void
3326 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3327 {
3328 enum bfd_endian byte_order = type_byte_order (type);
3329 LONGEST len;
3330
3331 type = check_typedef (type);
3332 len = TYPE_LENGTH (type);
3333
3334 switch (TYPE_CODE (type))
3335 {
3336 case TYPE_CODE_RANGE:
3337 num -= TYPE_RANGE_DATA (type)->bias;
3338 /* Fall through. */
3339 case TYPE_CODE_INT:
3340 case TYPE_CODE_CHAR:
3341 case TYPE_CODE_ENUM:
3342 case TYPE_CODE_FLAGS:
3343 case TYPE_CODE_BOOL:
3344 case TYPE_CODE_MEMBERPTR:
3345 store_signed_integer (buf, len, byte_order, num);
3346 break;
3347
3348 case TYPE_CODE_REF:
3349 case TYPE_CODE_RVALUE_REF:
3350 case TYPE_CODE_PTR:
3351 store_typed_address (buf, type, (CORE_ADDR) num);
3352 break;
3353
3354 case TYPE_CODE_FLT:
3355 case TYPE_CODE_DECFLOAT:
3356 target_float_from_longest (buf, type, num);
3357 break;
3358
3359 default:
3360 error (_("Unexpected type (%d) encountered for integer constant."),
3361 TYPE_CODE (type));
3362 }
3363 }
3364
3365
3366 /* Pack NUM into BUF using a target format of TYPE. */
3367
3368 static void
3369 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3370 {
3371 LONGEST len;
3372 enum bfd_endian byte_order;
3373
3374 type = check_typedef (type);
3375 len = TYPE_LENGTH (type);
3376 byte_order = type_byte_order (type);
3377
3378 switch (TYPE_CODE (type))
3379 {
3380 case TYPE_CODE_INT:
3381 case TYPE_CODE_CHAR:
3382 case TYPE_CODE_ENUM:
3383 case TYPE_CODE_FLAGS:
3384 case TYPE_CODE_BOOL:
3385 case TYPE_CODE_RANGE:
3386 case TYPE_CODE_MEMBERPTR:
3387 store_unsigned_integer (buf, len, byte_order, num);
3388 break;
3389
3390 case TYPE_CODE_REF:
3391 case TYPE_CODE_RVALUE_REF:
3392 case TYPE_CODE_PTR:
3393 store_typed_address (buf, type, (CORE_ADDR) num);
3394 break;
3395
3396 case TYPE_CODE_FLT:
3397 case TYPE_CODE_DECFLOAT:
3398 target_float_from_ulongest (buf, type, num);
3399 break;
3400
3401 default:
3402 error (_("Unexpected type (%d) encountered "
3403 "for unsigned integer constant."),
3404 TYPE_CODE (type));
3405 }
3406 }
3407
3408
3409 /* Convert C numbers into newly allocated values. */
3410
3411 struct value *
3412 value_from_longest (struct type *type, LONGEST num)
3413 {
3414 struct value *val = allocate_value (type);
3415
3416 pack_long (value_contents_raw (val), type, num);
3417 return val;
3418 }
3419
3420
3421 /* Convert C unsigned numbers into newly allocated values. */
3422
3423 struct value *
3424 value_from_ulongest (struct type *type, ULONGEST num)
3425 {
3426 struct value *val = allocate_value (type);
3427
3428 pack_unsigned_long (value_contents_raw (val), type, num);
3429
3430 return val;
3431 }
3432
3433
3434 /* Create a value representing a pointer of type TYPE to the address
3435 ADDR. */
3436
3437 struct value *
3438 value_from_pointer (struct type *type, CORE_ADDR addr)
3439 {
3440 struct value *val = allocate_value (type);
3441
3442 store_typed_address (value_contents_raw (val),
3443 check_typedef (type), addr);
3444 return val;
3445 }
3446
3447 /* Create and return a value object of TYPE containing the value D. The
3448 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3449 it is converted to target format. */
3450
3451 struct value *
3452 value_from_host_double (struct type *type, double d)
3453 {
3454 struct value *value = allocate_value (type);
3455 gdb_assert (TYPE_CODE (type) == TYPE_CODE_FLT);
3456 target_float_from_host_double (value_contents_raw (value),
3457 value_type (value), d);
3458 return value;
3459 }
3460
3461 /* Create a value of type TYPE whose contents come from VALADDR, if it
3462 is non-null, and whose memory address (in the inferior) is
3463 ADDRESS. The type of the created value may differ from the passed
3464 type TYPE. Make sure to retrieve values new type after this call.
3465 Note that TYPE is not passed through resolve_dynamic_type; this is
3466 a special API intended for use only by Ada. */
3467
3468 struct value *
3469 value_from_contents_and_address_unresolved (struct type *type,
3470 const gdb_byte *valaddr,
3471 CORE_ADDR address)
3472 {
3473 struct value *v;
3474
3475 if (valaddr == NULL)
3476 v = allocate_value_lazy (type);
3477 else
3478 v = value_from_contents (type, valaddr);
3479 VALUE_LVAL (v) = lval_memory;
3480 set_value_address (v, address);
3481 return v;
3482 }
3483
3484 /* Create a value of type TYPE whose contents come from VALADDR, if it
3485 is non-null, and whose memory address (in the inferior) is
3486 ADDRESS. The type of the created value may differ from the passed
3487 type TYPE. Make sure to retrieve values new type after this call. */
3488
3489 struct value *
3490 value_from_contents_and_address (struct type *type,
3491 const gdb_byte *valaddr,
3492 CORE_ADDR address)
3493 {
3494 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3495 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3496 struct value *v;
3497
3498 if (valaddr == NULL)
3499 v = allocate_value_lazy (resolved_type);
3500 else
3501 v = value_from_contents (resolved_type, valaddr);
3502 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3503 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3504 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3505 VALUE_LVAL (v) = lval_memory;
3506 set_value_address (v, address);
3507 return v;
3508 }
3509
3510 /* Create a value of type TYPE holding the contents CONTENTS.
3511 The new value is `not_lval'. */
3512
3513 struct value *
3514 value_from_contents (struct type *type, const gdb_byte *contents)
3515 {
3516 struct value *result;
3517
3518 result = allocate_value (type);
3519 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3520 return result;
3521 }
3522
3523 /* Extract a value from the history file. Input will be of the form
3524 $digits or $$digits. See block comment above 'write_dollar_variable'
3525 for details. */
3526
3527 struct value *
3528 value_from_history_ref (const char *h, const char **endp)
3529 {
3530 int index, len;
3531
3532 if (h[0] == '$')
3533 len = 1;
3534 else
3535 return NULL;
3536
3537 if (h[1] == '$')
3538 len = 2;
3539
3540 /* Find length of numeral string. */
3541 for (; isdigit (h[len]); len++)
3542 ;
3543
3544 /* Make sure numeral string is not part of an identifier. */
3545 if (h[len] == '_' || isalpha (h[len]))
3546 return NULL;
3547
3548 /* Now collect the index value. */
3549 if (h[1] == '$')
3550 {
3551 if (len == 2)
3552 {
3553 /* For some bizarre reason, "$$" is equivalent to "$$1",
3554 rather than to "$$0" as it ought to be! */
3555 index = -1;
3556 *endp += len;
3557 }
3558 else
3559 {
3560 char *local_end;
3561
3562 index = -strtol (&h[2], &local_end, 10);
3563 *endp = local_end;
3564 }
3565 }
3566 else
3567 {
3568 if (len == 1)
3569 {
3570 /* "$" is equivalent to "$0". */
3571 index = 0;
3572 *endp += len;
3573 }
3574 else
3575 {
3576 char *local_end;
3577
3578 index = strtol (&h[1], &local_end, 10);
3579 *endp = local_end;
3580 }
3581 }
3582
3583 return access_value_history (index);
3584 }
3585
3586 /* Get the component value (offset by OFFSET bytes) of a struct or
3587 union WHOLE. Component's type is TYPE. */
3588
3589 struct value *
3590 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3591 {
3592 struct value *v;
3593
3594 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3595 v = allocate_value_lazy (type);
3596 else
3597 {
3598 v = allocate_value (type);
3599 value_contents_copy (v, value_embedded_offset (v),
3600 whole, value_embedded_offset (whole) + offset,
3601 type_length_units (type));
3602 }
3603 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3604 set_value_component_location (v, whole);
3605
3606 return v;
3607 }
3608
3609 struct value *
3610 coerce_ref_if_computed (const struct value *arg)
3611 {
3612 const struct lval_funcs *funcs;
3613
3614 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3615 return NULL;
3616
3617 if (value_lval_const (arg) != lval_computed)
3618 return NULL;
3619
3620 funcs = value_computed_funcs (arg);
3621 if (funcs->coerce_ref == NULL)
3622 return NULL;
3623
3624 return funcs->coerce_ref (arg);
3625 }
3626
3627 /* Look at value.h for description. */
3628
3629 struct value *
3630 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3631 const struct type *original_type,
3632 const struct value *original_value)
3633 {
3634 /* Re-adjust type. */
3635 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3636
3637 /* Add embedding info. */
3638 set_value_enclosing_type (value, enc_type);
3639 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3640
3641 /* We may be pointing to an object of some derived type. */
3642 return value_full_object (value, NULL, 0, 0, 0);
3643 }
3644
3645 struct value *
3646 coerce_ref (struct value *arg)
3647 {
3648 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3649 struct value *retval;
3650 struct type *enc_type;
3651
3652 retval = coerce_ref_if_computed (arg);
3653 if (retval)
3654 return retval;
3655
3656 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3657 return arg;
3658
3659 enc_type = check_typedef (value_enclosing_type (arg));
3660 enc_type = TYPE_TARGET_TYPE (enc_type);
3661
3662 retval = value_at_lazy (enc_type,
3663 unpack_pointer (value_type (arg),
3664 value_contents (arg)));
3665 enc_type = value_type (retval);
3666 return readjust_indirect_value_type (retval, enc_type,
3667 value_type_arg_tmp, arg);
3668 }
3669
3670 struct value *
3671 coerce_array (struct value *arg)
3672 {
3673 struct type *type;
3674
3675 arg = coerce_ref (arg);
3676 type = check_typedef (value_type (arg));
3677
3678 switch (TYPE_CODE (type))
3679 {
3680 case TYPE_CODE_ARRAY:
3681 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3682 arg = value_coerce_array (arg);
3683 break;
3684 case TYPE_CODE_FUNC:
3685 arg = value_coerce_function (arg);
3686 break;
3687 }
3688 return arg;
3689 }
3690 \f
3691
3692 /* Return the return value convention that will be used for the
3693 specified type. */
3694
3695 enum return_value_convention
3696 struct_return_convention (struct gdbarch *gdbarch,
3697 struct value *function, struct type *value_type)
3698 {
3699 enum type_code code = TYPE_CODE (value_type);
3700
3701 if (code == TYPE_CODE_ERROR)
3702 error (_("Function return type unknown."));
3703
3704 /* Probe the architecture for the return-value convention. */
3705 return gdbarch_return_value (gdbarch, function, value_type,
3706 NULL, NULL, NULL);
3707 }
3708
3709 /* Return true if the function returning the specified type is using
3710 the convention of returning structures in memory (passing in the
3711 address as a hidden first parameter). */
3712
3713 int
3714 using_struct_return (struct gdbarch *gdbarch,
3715 struct value *function, struct type *value_type)
3716 {
3717 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3718 /* A void return value is never in memory. See also corresponding
3719 code in "print_return_value". */
3720 return 0;
3721
3722 return (struct_return_convention (gdbarch, function, value_type)
3723 != RETURN_VALUE_REGISTER_CONVENTION);
3724 }
3725
3726 /* Set the initialized field in a value struct. */
3727
3728 void
3729 set_value_initialized (struct value *val, int status)
3730 {
3731 val->initialized = status;
3732 }
3733
3734 /* Return the initialized field in a value struct. */
3735
3736 int
3737 value_initialized (const struct value *val)
3738 {
3739 return val->initialized;
3740 }
3741
3742 /* Helper for value_fetch_lazy when the value is a bitfield. */
3743
3744 static void
3745 value_fetch_lazy_bitfield (struct value *val)
3746 {
3747 gdb_assert (value_bitsize (val) != 0);
3748
3749 /* To read a lazy bitfield, read the entire enclosing value. This
3750 prevents reading the same block of (possibly volatile) memory once
3751 per bitfield. It would be even better to read only the containing
3752 word, but we have no way to record that just specific bits of a
3753 value have been fetched. */
3754 struct value *parent = value_parent (val);
3755
3756 if (value_lazy (parent))
3757 value_fetch_lazy (parent);
3758
3759 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3760 value_contents_for_printing (parent),
3761 value_offset (val), parent);
3762 }
3763
3764 /* Helper for value_fetch_lazy when the value is in memory. */
3765
3766 static void
3767 value_fetch_lazy_memory (struct value *val)
3768 {
3769 gdb_assert (VALUE_LVAL (val) == lval_memory);
3770
3771 CORE_ADDR addr = value_address (val);
3772 struct type *type = check_typedef (value_enclosing_type (val));
3773
3774 if (TYPE_LENGTH (type))
3775 read_value_memory (val, 0, value_stack (val),
3776 addr, value_contents_all_raw (val),
3777 type_length_units (type));
3778 }
3779
3780 /* Helper for value_fetch_lazy when the value is in a register. */
3781
3782 static void
3783 value_fetch_lazy_register (struct value *val)
3784 {
3785 struct frame_info *next_frame;
3786 int regnum;
3787 struct type *type = check_typedef (value_type (val));
3788 struct value *new_val = val, *mark = value_mark ();
3789
3790 /* Offsets are not supported here; lazy register values must
3791 refer to the entire register. */
3792 gdb_assert (value_offset (val) == 0);
3793
3794 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3795 {
3796 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3797
3798 next_frame = frame_find_by_id (next_frame_id);
3799 regnum = VALUE_REGNUM (new_val);
3800
3801 gdb_assert (next_frame != NULL);
3802
3803 /* Convertible register routines are used for multi-register
3804 values and for interpretation in different types
3805 (e.g. float or int from a double register). Lazy
3806 register values should have the register's natural type,
3807 so they do not apply. */
3808 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3809 regnum, type));
3810
3811 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3812 Since a "->next" operation was performed when setting
3813 this field, we do not need to perform a "next" operation
3814 again when unwinding the register. That's why
3815 frame_unwind_register_value() is called here instead of
3816 get_frame_register_value(). */
3817 new_val = frame_unwind_register_value (next_frame, regnum);
3818
3819 /* If we get another lazy lval_register value, it means the
3820 register is found by reading it from NEXT_FRAME's next frame.
3821 frame_unwind_register_value should never return a value with
3822 the frame id pointing to NEXT_FRAME. If it does, it means we
3823 either have two consecutive frames with the same frame id
3824 in the frame chain, or some code is trying to unwind
3825 behind get_prev_frame's back (e.g., a frame unwind
3826 sniffer trying to unwind), bypassing its validations. In
3827 any case, it should always be an internal error to end up
3828 in this situation. */
3829 if (VALUE_LVAL (new_val) == lval_register
3830 && value_lazy (new_val)
3831 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3832 internal_error (__FILE__, __LINE__,
3833 _("infinite loop while fetching a register"));
3834 }
3835
3836 /* If it's still lazy (for instance, a saved register on the
3837 stack), fetch it. */
3838 if (value_lazy (new_val))
3839 value_fetch_lazy (new_val);
3840
3841 /* Copy the contents and the unavailability/optimized-out
3842 meta-data from NEW_VAL to VAL. */
3843 set_value_lazy (val, 0);
3844 value_contents_copy (val, value_embedded_offset (val),
3845 new_val, value_embedded_offset (new_val),
3846 type_length_units (type));
3847
3848 if (frame_debug)
3849 {
3850 struct gdbarch *gdbarch;
3851 struct frame_info *frame;
3852 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3853 so that the frame level will be shown correctly. */
3854 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3855 regnum = VALUE_REGNUM (val);
3856 gdbarch = get_frame_arch (frame);
3857
3858 fprintf_unfiltered (gdb_stdlog,
3859 "{ value_fetch_lazy "
3860 "(frame=%d,regnum=%d(%s),...) ",
3861 frame_relative_level (frame), regnum,
3862 user_reg_map_regnum_to_name (gdbarch, regnum));
3863
3864 fprintf_unfiltered (gdb_stdlog, "->");
3865 if (value_optimized_out (new_val))
3866 {
3867 fprintf_unfiltered (gdb_stdlog, " ");
3868 val_print_optimized_out (new_val, gdb_stdlog);
3869 }
3870 else
3871 {
3872 int i;
3873 const gdb_byte *buf = value_contents (new_val);
3874
3875 if (VALUE_LVAL (new_val) == lval_register)
3876 fprintf_unfiltered (gdb_stdlog, " register=%d",
3877 VALUE_REGNUM (new_val));
3878 else if (VALUE_LVAL (new_val) == lval_memory)
3879 fprintf_unfiltered (gdb_stdlog, " address=%s",
3880 paddress (gdbarch,
3881 value_address (new_val)));
3882 else
3883 fprintf_unfiltered (gdb_stdlog, " computed");
3884
3885 fprintf_unfiltered (gdb_stdlog, " bytes=");
3886 fprintf_unfiltered (gdb_stdlog, "[");
3887 for (i = 0; i < register_size (gdbarch, regnum); i++)
3888 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3889 fprintf_unfiltered (gdb_stdlog, "]");
3890 }
3891
3892 fprintf_unfiltered (gdb_stdlog, " }\n");
3893 }
3894
3895 /* Dispose of the intermediate values. This prevents
3896 watchpoints from trying to watch the saved frame pointer. */
3897 value_free_to_mark (mark);
3898 }
3899
3900 /* Load the actual content of a lazy value. Fetch the data from the
3901 user's process and clear the lazy flag to indicate that the data in
3902 the buffer is valid.
3903
3904 If the value is zero-length, we avoid calling read_memory, which
3905 would abort. We mark the value as fetched anyway -- all 0 bytes of
3906 it. */
3907
3908 void
3909 value_fetch_lazy (struct value *val)
3910 {
3911 gdb_assert (value_lazy (val));
3912 allocate_value_contents (val);
3913 /* A value is either lazy, or fully fetched. The
3914 availability/validity is only established as we try to fetch a
3915 value. */
3916 gdb_assert (val->optimized_out.empty ());
3917 gdb_assert (val->unavailable.empty ());
3918 if (value_bitsize (val))
3919 value_fetch_lazy_bitfield (val);
3920 else if (VALUE_LVAL (val) == lval_memory)
3921 value_fetch_lazy_memory (val);
3922 else if (VALUE_LVAL (val) == lval_register)
3923 value_fetch_lazy_register (val);
3924 else if (VALUE_LVAL (val) == lval_computed
3925 && value_computed_funcs (val)->read != NULL)
3926 value_computed_funcs (val)->read (val);
3927 else
3928 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3929
3930 set_value_lazy (val, 0);
3931 }
3932
3933 /* Implementation of the convenience function $_isvoid. */
3934
3935 static struct value *
3936 isvoid_internal_fn (struct gdbarch *gdbarch,
3937 const struct language_defn *language,
3938 void *cookie, int argc, struct value **argv)
3939 {
3940 int ret;
3941
3942 if (argc != 1)
3943 error (_("You must provide one argument for $_isvoid."));
3944
3945 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3946
3947 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3948 }
3949
3950 /* Implementation of the convenience function $_creal. Extracts the
3951 real part from a complex number. */
3952
3953 static struct value *
3954 creal_internal_fn (struct gdbarch *gdbarch,
3955 const struct language_defn *language,
3956 void *cookie, int argc, struct value **argv)
3957 {
3958 if (argc != 1)
3959 error (_("You must provide one argument for $_creal."));
3960
3961 value *cval = argv[0];
3962 type *ctype = check_typedef (value_type (cval));
3963 if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
3964 error (_("expected a complex number"));
3965 return value_from_component (cval, TYPE_TARGET_TYPE (ctype), 0);
3966 }
3967
3968 /* Implementation of the convenience function $_cimag. Extracts the
3969 imaginary part from a complex number. */
3970
3971 static struct value *
3972 cimag_internal_fn (struct gdbarch *gdbarch,
3973 const struct language_defn *language,
3974 void *cookie, int argc,
3975 struct value **argv)
3976 {
3977 if (argc != 1)
3978 error (_("You must provide one argument for $_cimag."));
3979
3980 value *cval = argv[0];
3981 type *ctype = check_typedef (value_type (cval));
3982 if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
3983 error (_("expected a complex number"));
3984 return value_from_component (cval, TYPE_TARGET_TYPE (ctype),
3985 TYPE_LENGTH (TYPE_TARGET_TYPE (ctype)));
3986 }
3987
3988 #if GDB_SELF_TEST
3989 namespace selftests
3990 {
3991
3992 /* Test the ranges_contain function. */
3993
3994 static void
3995 test_ranges_contain ()
3996 {
3997 std::vector<range> ranges;
3998 range r;
3999
4000 /* [10, 14] */
4001 r.offset = 10;
4002 r.length = 5;
4003 ranges.push_back (r);
4004
4005 /* [20, 24] */
4006 r.offset = 20;
4007 r.length = 5;
4008 ranges.push_back (r);
4009
4010 /* [2, 6] */
4011 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4012 /* [9, 13] */
4013 SELF_CHECK (ranges_contain (ranges, 9, 5));
4014 /* [10, 11] */
4015 SELF_CHECK (ranges_contain (ranges, 10, 2));
4016 /* [10, 14] */
4017 SELF_CHECK (ranges_contain (ranges, 10, 5));
4018 /* [13, 18] */
4019 SELF_CHECK (ranges_contain (ranges, 13, 6));
4020 /* [14, 18] */
4021 SELF_CHECK (ranges_contain (ranges, 14, 5));
4022 /* [15, 18] */
4023 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4024 /* [16, 19] */
4025 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4026 /* [16, 21] */
4027 SELF_CHECK (ranges_contain (ranges, 16, 6));
4028 /* [21, 21] */
4029 SELF_CHECK (ranges_contain (ranges, 21, 1));
4030 /* [21, 25] */
4031 SELF_CHECK (ranges_contain (ranges, 21, 5));
4032 /* [26, 28] */
4033 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4034 }
4035
4036 /* Check that RANGES contains the same ranges as EXPECTED. */
4037
4038 static bool
4039 check_ranges_vector (gdb::array_view<const range> ranges,
4040 gdb::array_view<const range> expected)
4041 {
4042 return ranges == expected;
4043 }
4044
4045 /* Test the insert_into_bit_range_vector function. */
4046
4047 static void
4048 test_insert_into_bit_range_vector ()
4049 {
4050 std::vector<range> ranges;
4051
4052 /* [10, 14] */
4053 {
4054 insert_into_bit_range_vector (&ranges, 10, 5);
4055 static const range expected[] = {
4056 {10, 5}
4057 };
4058 SELF_CHECK (check_ranges_vector (ranges, expected));
4059 }
4060
4061 /* [10, 14] */
4062 {
4063 insert_into_bit_range_vector (&ranges, 11, 4);
4064 static const range expected = {10, 5};
4065 SELF_CHECK (check_ranges_vector (ranges, expected));
4066 }
4067
4068 /* [10, 14] [20, 24] */
4069 {
4070 insert_into_bit_range_vector (&ranges, 20, 5);
4071 static const range expected[] = {
4072 {10, 5},
4073 {20, 5},
4074 };
4075 SELF_CHECK (check_ranges_vector (ranges, expected));
4076 }
4077
4078 /* [10, 14] [17, 24] */
4079 {
4080 insert_into_bit_range_vector (&ranges, 17, 5);
4081 static const range expected[] = {
4082 {10, 5},
4083 {17, 8},
4084 };
4085 SELF_CHECK (check_ranges_vector (ranges, expected));
4086 }
4087
4088 /* [2, 8] [10, 14] [17, 24] */
4089 {
4090 insert_into_bit_range_vector (&ranges, 2, 7);
4091 static const range expected[] = {
4092 {2, 7},
4093 {10, 5},
4094 {17, 8},
4095 };
4096 SELF_CHECK (check_ranges_vector (ranges, expected));
4097 }
4098
4099 /* [2, 14] [17, 24] */
4100 {
4101 insert_into_bit_range_vector (&ranges, 9, 1);
4102 static const range expected[] = {
4103 {2, 13},
4104 {17, 8},
4105 };
4106 SELF_CHECK (check_ranges_vector (ranges, expected));
4107 }
4108
4109 /* [2, 14] [17, 24] */
4110 {
4111 insert_into_bit_range_vector (&ranges, 9, 1);
4112 static const range expected[] = {
4113 {2, 13},
4114 {17, 8},
4115 };
4116 SELF_CHECK (check_ranges_vector (ranges, expected));
4117 }
4118
4119 /* [2, 33] */
4120 {
4121 insert_into_bit_range_vector (&ranges, 4, 30);
4122 static const range expected = {2, 32};
4123 SELF_CHECK (check_ranges_vector (ranges, expected));
4124 }
4125 }
4126
4127 } /* namespace selftests */
4128 #endif /* GDB_SELF_TEST */
4129
4130 void
4131 _initialize_values (void)
4132 {
4133 add_cmd ("convenience", no_class, show_convenience, _("\
4134 Debugger convenience (\"$foo\") variables and functions.\n\
4135 Convenience variables are created when you assign them values;\n\
4136 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4137 \n\
4138 A few convenience variables are given values automatically:\n\
4139 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4140 \"$__\" holds the contents of the last address examined with \"x\"."
4141 #ifdef HAVE_PYTHON
4142 "\n\n\
4143 Convenience functions are defined via the Python API."
4144 #endif
4145 ), &showlist);
4146 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4147
4148 add_cmd ("values", no_set_class, show_values, _("\
4149 Elements of value history around item number IDX (or last ten)."),
4150 &showlist);
4151
4152 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4153 Initialize a convenience variable if necessary.\n\
4154 init-if-undefined VARIABLE = EXPRESSION\n\
4155 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4156 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4157 VARIABLE is already initialized."));
4158
4159 add_prefix_cmd ("function", no_class, function_command, _("\
4160 Placeholder command for showing help on convenience functions."),
4161 &functionlist, "function ", 0, &cmdlist);
4162
4163 add_internal_function ("_isvoid", _("\
4164 Check whether an expression is void.\n\
4165 Usage: $_isvoid (expression)\n\
4166 Return 1 if the expression is void, zero otherwise."),
4167 isvoid_internal_fn, NULL);
4168
4169 add_internal_function ("_creal", _("\
4170 Extract the real part of a complex number.\n\
4171 Usage: $_creal (expression)\n\
4172 Return the real part of a complex number, the type depends on the\n\
4173 type of a complex number."),
4174 creal_internal_fn, NULL);
4175
4176 add_internal_function ("_cimag", _("\
4177 Extract the imaginary part of a complex number.\n\
4178 Usage: $_cimag (expression)\n\
4179 Return the imaginary part of a complex number, the type depends on the\n\
4180 type of a complex number."),
4181 cimag_internal_fn, NULL);
4182
4183 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4184 class_support, &max_value_size, _("\
4185 Set maximum sized value gdb will load from the inferior."), _("\
4186 Show maximum sized value gdb will load from the inferior."), _("\
4187 Use this to control the maximum size, in bytes, of a value that gdb\n\
4188 will load from the inferior. Setting this value to 'unlimited'\n\
4189 disables checking.\n\
4190 Setting this does not invalidate already allocated values, it only\n\
4191 prevents future values, larger than this size, from being allocated."),
4192 set_max_value_size,
4193 show_max_value_size,
4194 &setlist, &showlist);
4195 #if GDB_SELF_TEST
4196 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4197 selftests::register_test ("insert_into_bit_range_vector",
4198 selftests::test_insert_into_bit_range_vector);
4199 #endif
4200 }
4201
4202 /* See value.h. */
4203
4204 void
4205 finalize_values ()
4206 {
4207 all_values.clear ();
4208 }
This page took 0.130538 seconds and 4 git commands to generate.