Fix copy-pasto, allocate objfile_per_bfd_storage with obstack_new
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "selftest.h"
45 #include "common/array-view.h"
46
47 /* Definition of a user function. */
48 struct internal_function
49 {
50 /* The name of the function. It is a bit odd to have this in the
51 function itself -- the user might use a differently-named
52 convenience variable to hold the function. */
53 char *name;
54
55 /* The handler. */
56 internal_function_fn handler;
57
58 /* User data for the handler. */
59 void *cookie;
60 };
61
62 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
63
64 struct range
65 {
66 /* Lowest offset in the range. */
67 LONGEST offset;
68
69 /* Length of the range. */
70 LONGEST length;
71
72 /* Returns true if THIS is strictly less than OTHER, useful for
73 searching. We keep ranges sorted by offset and coalesce
74 overlapping and contiguous ranges, so this just compares the
75 starting offset. */
76
77 bool operator< (const range &other) const
78 {
79 return offset < other.offset;
80 }
81
82 /* Returns true if THIS is equal to OTHER. */
83 bool operator== (const range &other) const
84 {
85 return offset == other.offset && length == other.length;
86 }
87 };
88
89 /* Returns true if the ranges defined by [offset1, offset1+len1) and
90 [offset2, offset2+len2) overlap. */
91
92 static int
93 ranges_overlap (LONGEST offset1, LONGEST len1,
94 LONGEST offset2, LONGEST len2)
95 {
96 ULONGEST h, l;
97
98 l = std::max (offset1, offset2);
99 h = std::min (offset1 + len1, offset2 + len2);
100 return (l < h);
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
108 LONGEST length)
109 {
110 range what;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146
147 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
148
149 if (i > ranges.begin ())
150 {
151 const struct range &bef = *(i - 1);
152
153 if (ranges_overlap (bef.offset, bef.length, offset, length))
154 return 1;
155 }
156
157 if (i < ranges.end ())
158 {
159 const struct range &r = *i;
160
161 if (ranges_overlap (r.offset, r.length, offset, length))
162 return 1;
163 }
164
165 return 0;
166 }
167
168 static struct cmd_list_element *functionlist;
169
170 /* Note that the fields in this structure are arranged to save a bit
171 of memory. */
172
173 struct value
174 {
175 explicit value (struct type *type_)
176 : modifiable (1),
177 lazy (1),
178 initialized (1),
179 stack (0),
180 type (type_),
181 enclosing_type (type_)
182 {
183 location.address = 0;
184 }
185
186 ~value ()
187 {
188 if (VALUE_LVAL (this) == lval_computed)
189 {
190 const struct lval_funcs *funcs = location.computed.funcs;
191
192 if (funcs->free_closure)
193 funcs->free_closure (this);
194 }
195 else if (VALUE_LVAL (this) == lval_xcallable)
196 delete location.xm_worker;
197 }
198
199 DISABLE_COPY_AND_ASSIGN (value);
200
201 /* Type of value; either not an lval, or one of the various
202 different possible kinds of lval. */
203 enum lval_type lval = not_lval;
204
205 /* Is it modifiable? Only relevant if lval != not_lval. */
206 unsigned int modifiable : 1;
207
208 /* If zero, contents of this value are in the contents field. If
209 nonzero, contents are in inferior. If the lval field is lval_memory,
210 the contents are in inferior memory at location.address plus offset.
211 The lval field may also be lval_register.
212
213 WARNING: This field is used by the code which handles watchpoints
214 (see breakpoint.c) to decide whether a particular value can be
215 watched by hardware watchpoints. If the lazy flag is set for
216 some member of a value chain, it is assumed that this member of
217 the chain doesn't need to be watched as part of watching the
218 value itself. This is how GDB avoids watching the entire struct
219 or array when the user wants to watch a single struct member or
220 array element. If you ever change the way lazy flag is set and
221 reset, be sure to consider this use as well! */
222 unsigned int lazy : 1;
223
224 /* If value is a variable, is it initialized or not. */
225 unsigned int initialized : 1;
226
227 /* If value is from the stack. If this is set, read_stack will be
228 used instead of read_memory to enable extra caching. */
229 unsigned int stack : 1;
230
231 /* Location of value (if lval). */
232 union
233 {
234 /* If lval == lval_memory, this is the address in the inferior */
235 CORE_ADDR address;
236
237 /*If lval == lval_register, the value is from a register. */
238 struct
239 {
240 /* Register number. */
241 int regnum;
242 /* Frame ID of "next" frame to which a register value is relative.
243 If the register value is found relative to frame F, then the
244 frame id of F->next will be stored in next_frame_id. */
245 struct frame_id next_frame_id;
246 } reg;
247
248 /* Pointer to internal variable. */
249 struct internalvar *internalvar;
250
251 /* Pointer to xmethod worker. */
252 struct xmethod_worker *xm_worker;
253
254 /* If lval == lval_computed, this is a set of function pointers
255 to use to access and describe the value, and a closure pointer
256 for them to use. */
257 struct
258 {
259 /* Functions to call. */
260 const struct lval_funcs *funcs;
261
262 /* Closure for those functions to use. */
263 void *closure;
264 } computed;
265 } location;
266
267 /* Describes offset of a value within lval of a structure in target
268 addressable memory units. Note also the member embedded_offset
269 below. */
270 LONGEST offset = 0;
271
272 /* Only used for bitfields; number of bits contained in them. */
273 LONGEST bitsize = 0;
274
275 /* Only used for bitfields; position of start of field. For
276 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
277 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
278 LONGEST bitpos = 0;
279
280 /* The number of references to this value. When a value is created,
281 the value chain holds a reference, so REFERENCE_COUNT is 1. If
282 release_value is called, this value is removed from the chain but
283 the caller of release_value now has a reference to this value.
284 The caller must arrange for a call to value_free later. */
285 int reference_count = 1;
286
287 /* Only used for bitfields; the containing value. This allows a
288 single read from the target when displaying multiple
289 bitfields. */
290 value_ref_ptr parent;
291
292 /* Type of the value. */
293 struct type *type;
294
295 /* If a value represents a C++ object, then the `type' field gives
296 the object's compile-time type. If the object actually belongs
297 to some class derived from `type', perhaps with other base
298 classes and additional members, then `type' is just a subobject
299 of the real thing, and the full object is probably larger than
300 `type' would suggest.
301
302 If `type' is a dynamic class (i.e. one with a vtable), then GDB
303 can actually determine the object's run-time type by looking at
304 the run-time type information in the vtable. When this
305 information is available, we may elect to read in the entire
306 object, for several reasons:
307
308 - When printing the value, the user would probably rather see the
309 full object, not just the limited portion apparent from the
310 compile-time type.
311
312 - If `type' has virtual base classes, then even printing `type'
313 alone may require reaching outside the `type' portion of the
314 object to wherever the virtual base class has been stored.
315
316 When we store the entire object, `enclosing_type' is the run-time
317 type -- the complete object -- and `embedded_offset' is the
318 offset of `type' within that larger type, in target addressable memory
319 units. The value_contents() macro takes `embedded_offset' into account,
320 so most GDB code continues to see the `type' portion of the value, just
321 as the inferior would.
322
323 If `type' is a pointer to an object, then `enclosing_type' is a
324 pointer to the object's run-time type, and `pointed_to_offset' is
325 the offset in target addressable memory units from the full object
326 to the pointed-to object -- that is, the value `embedded_offset' would
327 have if we followed the pointer and fetched the complete object.
328 (I don't really see the point. Why not just determine the
329 run-time type when you indirect, and avoid the special case? The
330 contents don't matter until you indirect anyway.)
331
332 If we're not doing anything fancy, `enclosing_type' is equal to
333 `type', and `embedded_offset' is zero, so everything works
334 normally. */
335 struct type *enclosing_type;
336 LONGEST embedded_offset = 0;
337 LONGEST pointed_to_offset = 0;
338
339 /* Actual contents of the value. Target byte-order. NULL or not
340 valid if lazy is nonzero. */
341 gdb::unique_xmalloc_ptr<gdb_byte> contents;
342
343 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
344 rather than available, since the common and default case is for a
345 value to be available. This is filled in at value read time.
346 The unavailable ranges are tracked in bits. Note that a contents
347 bit that has been optimized out doesn't really exist in the
348 program, so it can't be marked unavailable either. */
349 std::vector<range> unavailable;
350
351 /* Likewise, but for optimized out contents (a chunk of the value of
352 a variable that does not actually exist in the program). If LVAL
353 is lval_register, this is a register ($pc, $sp, etc., never a
354 program variable) that has not been saved in the frame. Not
355 saved registers and optimized-out program variables values are
356 treated pretty much the same, except not-saved registers have a
357 different string representation and related error strings. */
358 std::vector<range> optimized_out;
359 };
360
361 /* See value.h. */
362
363 struct gdbarch *
364 get_value_arch (const struct value *value)
365 {
366 return get_type_arch (value_type (value));
367 }
368
369 int
370 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
371 {
372 gdb_assert (!value->lazy);
373
374 return !ranges_contain (value->unavailable, offset, length);
375 }
376
377 int
378 value_bytes_available (const struct value *value,
379 LONGEST offset, LONGEST length)
380 {
381 return value_bits_available (value,
382 offset * TARGET_CHAR_BIT,
383 length * TARGET_CHAR_BIT);
384 }
385
386 int
387 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
388 {
389 gdb_assert (!value->lazy);
390
391 return ranges_contain (value->optimized_out, bit_offset, bit_length);
392 }
393
394 int
395 value_entirely_available (struct value *value)
396 {
397 /* We can only tell whether the whole value is available when we try
398 to read it. */
399 if (value->lazy)
400 value_fetch_lazy (value);
401
402 if (value->unavailable.empty ())
403 return 1;
404 return 0;
405 }
406
407 /* Returns true if VALUE is entirely covered by RANGES. If the value
408 is lazy, it'll be read now. Note that RANGE is a pointer to
409 pointer because reading the value might change *RANGE. */
410
411 static int
412 value_entirely_covered_by_range_vector (struct value *value,
413 const std::vector<range> &ranges)
414 {
415 /* We can only tell whether the whole value is optimized out /
416 unavailable when we try to read it. */
417 if (value->lazy)
418 value_fetch_lazy (value);
419
420 if (ranges.size () == 1)
421 {
422 const struct range &t = ranges[0];
423
424 if (t.offset == 0
425 && t.length == (TARGET_CHAR_BIT
426 * TYPE_LENGTH (value_enclosing_type (value))))
427 return 1;
428 }
429
430 return 0;
431 }
432
433 int
434 value_entirely_unavailable (struct value *value)
435 {
436 return value_entirely_covered_by_range_vector (value, value->unavailable);
437 }
438
439 int
440 value_entirely_optimized_out (struct value *value)
441 {
442 return value_entirely_covered_by_range_vector (value, value->optimized_out);
443 }
444
445 /* Insert into the vector pointed to by VECTORP the bit range starting of
446 OFFSET bits, and extending for the next LENGTH bits. */
447
448 static void
449 insert_into_bit_range_vector (std::vector<range> *vectorp,
450 LONGEST offset, LONGEST length)
451 {
452 range newr;
453
454 /* Insert the range sorted. If there's overlap or the new range
455 would be contiguous with an existing range, merge. */
456
457 newr.offset = offset;
458 newr.length = length;
459
460 /* Do a binary search for the position the given range would be
461 inserted if we only considered the starting OFFSET of ranges.
462 Call that position I. Since we also have LENGTH to care for
463 (this is a range afterall), we need to check if the _previous_
464 range overlaps the I range. E.g., calling R the new range:
465
466 #1 - overlaps with previous
467
468 R
469 |-...-|
470 |---| |---| |------| ... |--|
471 0 1 2 N
472
473 I=1
474
475 In the case #1 above, the binary search would return `I=1',
476 meaning, this OFFSET should be inserted at position 1, and the
477 current position 1 should be pushed further (and become 2). But,
478 note that `0' overlaps with R, so we want to merge them.
479
480 A similar consideration needs to be taken if the new range would
481 be contiguous with the previous range:
482
483 #2 - contiguous with previous
484
485 R
486 |-...-|
487 |--| |---| |------| ... |--|
488 0 1 2 N
489
490 I=1
491
492 If there's no overlap with the previous range, as in:
493
494 #3 - not overlapping and not contiguous
495
496 R
497 |-...-|
498 |--| |---| |------| ... |--|
499 0 1 2 N
500
501 I=1
502
503 or if I is 0:
504
505 #4 - R is the range with lowest offset
506
507 R
508 |-...-|
509 |--| |---| |------| ... |--|
510 0 1 2 N
511
512 I=0
513
514 ... we just push the new range to I.
515
516 All the 4 cases above need to consider that the new range may
517 also overlap several of the ranges that follow, or that R may be
518 contiguous with the following range, and merge. E.g.,
519
520 #5 - overlapping following ranges
521
522 R
523 |------------------------|
524 |--| |---| |------| ... |--|
525 0 1 2 N
526
527 I=0
528
529 or:
530
531 R
532 |-------|
533 |--| |---| |------| ... |--|
534 0 1 2 N
535
536 I=1
537
538 */
539
540 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
541 if (i > vectorp->begin ())
542 {
543 struct range &bef = *(i - 1);
544
545 if (ranges_overlap (bef.offset, bef.length, offset, length))
546 {
547 /* #1 */
548 ULONGEST l = std::min (bef.offset, offset);
549 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
550
551 bef.offset = l;
552 bef.length = h - l;
553 i--;
554 }
555 else if (offset == bef.offset + bef.length)
556 {
557 /* #2 */
558 bef.length += length;
559 i--;
560 }
561 else
562 {
563 /* #3 */
564 i = vectorp->insert (i, newr);
565 }
566 }
567 else
568 {
569 /* #4 */
570 i = vectorp->insert (i, newr);
571 }
572
573 /* Check whether the ranges following the one we've just added or
574 touched can be folded in (#5 above). */
575 if (i != vectorp->end () && i + 1 < vectorp->end ())
576 {
577 int removed = 0;
578 auto next = i + 1;
579
580 /* Get the range we just touched. */
581 struct range &t = *i;
582 removed = 0;
583
584 i = next;
585 for (; i < vectorp->end (); i++)
586 {
587 struct range &r = *i;
588 if (r.offset <= t.offset + t.length)
589 {
590 ULONGEST l, h;
591
592 l = std::min (t.offset, r.offset);
593 h = std::max (t.offset + t.length, r.offset + r.length);
594
595 t.offset = l;
596 t.length = h - l;
597
598 removed++;
599 }
600 else
601 {
602 /* If we couldn't merge this one, we won't be able to
603 merge following ones either, since the ranges are
604 always sorted by OFFSET. */
605 break;
606 }
607 }
608
609 if (removed != 0)
610 vectorp->erase (next, next + removed);
611 }
612 }
613
614 void
615 mark_value_bits_unavailable (struct value *value,
616 LONGEST offset, LONGEST length)
617 {
618 insert_into_bit_range_vector (&value->unavailable, offset, length);
619 }
620
621 void
622 mark_value_bytes_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 mark_value_bits_unavailable (value,
626 offset * TARGET_CHAR_BIT,
627 length * TARGET_CHAR_BIT);
628 }
629
630 /* Find the first range in RANGES that overlaps the range defined by
631 OFFSET and LENGTH, starting at element POS in the RANGES vector,
632 Returns the index into RANGES where such overlapping range was
633 found, or -1 if none was found. */
634
635 static int
636 find_first_range_overlap (const std::vector<range> *ranges, int pos,
637 LONGEST offset, LONGEST length)
638 {
639 int i;
640
641 for (i = pos; i < ranges->size (); i++)
642 {
643 const range &r = (*ranges)[i];
644 if (ranges_overlap (r.offset, r.length, offset, length))
645 return i;
646 }
647
648 return -1;
649 }
650
651 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
652 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
653 return non-zero.
654
655 It must always be the case that:
656 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
657
658 It is assumed that memory can be accessed from:
659 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
660 to:
661 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
662 / TARGET_CHAR_BIT) */
663 static int
664 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
665 const gdb_byte *ptr2, size_t offset2_bits,
666 size_t length_bits)
667 {
668 gdb_assert (offset1_bits % TARGET_CHAR_BIT
669 == offset2_bits % TARGET_CHAR_BIT);
670
671 if (offset1_bits % TARGET_CHAR_BIT != 0)
672 {
673 size_t bits;
674 gdb_byte mask, b1, b2;
675
676 /* The offset from the base pointers PTR1 and PTR2 is not a complete
677 number of bytes. A number of bits up to either the next exact
678 byte boundary, or LENGTH_BITS (which ever is sooner) will be
679 compared. */
680 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
681 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
682 mask = (1 << bits) - 1;
683
684 if (length_bits < bits)
685 {
686 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
687 bits = length_bits;
688 }
689
690 /* Now load the two bytes and mask off the bits we care about. */
691 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
692 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
693
694 if (b1 != b2)
695 return 1;
696
697 /* Now update the length and offsets to take account of the bits
698 we've just compared. */
699 length_bits -= bits;
700 offset1_bits += bits;
701 offset2_bits += bits;
702 }
703
704 if (length_bits % TARGET_CHAR_BIT != 0)
705 {
706 size_t bits;
707 size_t o1, o2;
708 gdb_byte mask, b1, b2;
709
710 /* The length is not an exact number of bytes. After the previous
711 IF.. block then the offsets are byte aligned, or the
712 length is zero (in which case this code is not reached). Compare
713 a number of bits at the end of the region, starting from an exact
714 byte boundary. */
715 bits = length_bits % TARGET_CHAR_BIT;
716 o1 = offset1_bits + length_bits - bits;
717 o2 = offset2_bits + length_bits - bits;
718
719 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
720 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
721
722 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
723 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
724
725 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
726 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
727
728 if (b1 != b2)
729 return 1;
730
731 length_bits -= bits;
732 }
733
734 if (length_bits > 0)
735 {
736 /* We've now taken care of any stray "bits" at the start, or end of
737 the region to compare, the remainder can be covered with a simple
738 memcmp. */
739 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
740 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
741 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
742
743 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
744 ptr2 + offset2_bits / TARGET_CHAR_BIT,
745 length_bits / TARGET_CHAR_BIT);
746 }
747
748 /* Length is zero, regions match. */
749 return 0;
750 }
751
752 /* Helper struct for find_first_range_overlap_and_match and
753 value_contents_bits_eq. Keep track of which slot of a given ranges
754 vector have we last looked at. */
755
756 struct ranges_and_idx
757 {
758 /* The ranges. */
759 const std::vector<range> *ranges;
760
761 /* The range we've last found in RANGES. Given ranges are sorted,
762 we can start the next lookup here. */
763 int idx;
764 };
765
766 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
767 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
768 ranges starting at OFFSET2 bits. Return true if the ranges match
769 and fill in *L and *H with the overlapping window relative to
770 (both) OFFSET1 or OFFSET2. */
771
772 static int
773 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
774 struct ranges_and_idx *rp2,
775 LONGEST offset1, LONGEST offset2,
776 LONGEST length, ULONGEST *l, ULONGEST *h)
777 {
778 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
779 offset1, length);
780 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
781 offset2, length);
782
783 if (rp1->idx == -1 && rp2->idx == -1)
784 {
785 *l = length;
786 *h = length;
787 return 1;
788 }
789 else if (rp1->idx == -1 || rp2->idx == -1)
790 return 0;
791 else
792 {
793 const range *r1, *r2;
794 ULONGEST l1, h1;
795 ULONGEST l2, h2;
796
797 r1 = &(*rp1->ranges)[rp1->idx];
798 r2 = &(*rp2->ranges)[rp2->idx];
799
800 /* Get the unavailable windows intersected by the incoming
801 ranges. The first and last ranges that overlap the argument
802 range may be wider than said incoming arguments ranges. */
803 l1 = std::max (offset1, r1->offset);
804 h1 = std::min (offset1 + length, r1->offset + r1->length);
805
806 l2 = std::max (offset2, r2->offset);
807 h2 = std::min (offset2 + length, offset2 + r2->length);
808
809 /* Make them relative to the respective start offsets, so we can
810 compare them for equality. */
811 l1 -= offset1;
812 h1 -= offset1;
813
814 l2 -= offset2;
815 h2 -= offset2;
816
817 /* Different ranges, no match. */
818 if (l1 != l2 || h1 != h2)
819 return 0;
820
821 *h = h1;
822 *l = l1;
823 return 1;
824 }
825 }
826
827 /* Helper function for value_contents_eq. The only difference is that
828 this function is bit rather than byte based.
829
830 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
831 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
832 Return true if the available bits match. */
833
834 static bool
835 value_contents_bits_eq (const struct value *val1, int offset1,
836 const struct value *val2, int offset2,
837 int length)
838 {
839 /* Each array element corresponds to a ranges source (unavailable,
840 optimized out). '1' is for VAL1, '2' for VAL2. */
841 struct ranges_and_idx rp1[2], rp2[2];
842
843 /* See function description in value.h. */
844 gdb_assert (!val1->lazy && !val2->lazy);
845
846 /* We shouldn't be trying to compare past the end of the values. */
847 gdb_assert (offset1 + length
848 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
849 gdb_assert (offset2 + length
850 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
851
852 memset (&rp1, 0, sizeof (rp1));
853 memset (&rp2, 0, sizeof (rp2));
854 rp1[0].ranges = &val1->unavailable;
855 rp2[0].ranges = &val2->unavailable;
856 rp1[1].ranges = &val1->optimized_out;
857 rp2[1].ranges = &val2->optimized_out;
858
859 while (length > 0)
860 {
861 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
862 int i;
863
864 for (i = 0; i < 2; i++)
865 {
866 ULONGEST l_tmp, h_tmp;
867
868 /* The contents only match equal if the invalid/unavailable
869 contents ranges match as well. */
870 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
871 offset1, offset2, length,
872 &l_tmp, &h_tmp))
873 return false;
874
875 /* We're interested in the lowest/first range found. */
876 if (i == 0 || l_tmp < l)
877 {
878 l = l_tmp;
879 h = h_tmp;
880 }
881 }
882
883 /* Compare the available/valid contents. */
884 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
885 val2->contents.get (), offset2, l) != 0)
886 return false;
887
888 length -= h;
889 offset1 += h;
890 offset2 += h;
891 }
892
893 return true;
894 }
895
896 bool
897 value_contents_eq (const struct value *val1, LONGEST offset1,
898 const struct value *val2, LONGEST offset2,
899 LONGEST length)
900 {
901 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
902 val2, offset2 * TARGET_CHAR_BIT,
903 length * TARGET_CHAR_BIT);
904 }
905
906
907 /* The value-history records all the values printed by print commands
908 during this session. */
909
910 static std::vector<value_ref_ptr> value_history;
911
912 \f
913 /* List of all value objects currently allocated
914 (except for those released by calls to release_value)
915 This is so they can be freed after each command. */
916
917 static std::vector<value_ref_ptr> all_values;
918
919 /* Allocate a lazy value for type TYPE. Its actual content is
920 "lazily" allocated too: the content field of the return value is
921 NULL; it will be allocated when it is fetched from the target. */
922
923 struct value *
924 allocate_value_lazy (struct type *type)
925 {
926 struct value *val;
927
928 /* Call check_typedef on our type to make sure that, if TYPE
929 is a TYPE_CODE_TYPEDEF, its length is set to the length
930 of the target type instead of zero. However, we do not
931 replace the typedef type by the target type, because we want
932 to keep the typedef in order to be able to set the VAL's type
933 description correctly. */
934 check_typedef (type);
935
936 val = new struct value (type);
937
938 /* Values start out on the all_values chain. */
939 all_values.emplace_back (val);
940
941 return val;
942 }
943
944 /* The maximum size, in bytes, that GDB will try to allocate for a value.
945 The initial value of 64k was not selected for any specific reason, it is
946 just a reasonable starting point. */
947
948 static int max_value_size = 65536; /* 64k bytes */
949
950 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
951 LONGEST, otherwise GDB will not be able to parse integer values from the
952 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
953 be unable to parse "set max-value-size 2".
954
955 As we want a consistent GDB experience across hosts with different sizes
956 of LONGEST, this arbitrary minimum value was selected, so long as this
957 is bigger than LONGEST on all GDB supported hosts we're fine. */
958
959 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
960 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
961
962 /* Implement the "set max-value-size" command. */
963
964 static void
965 set_max_value_size (const char *args, int from_tty,
966 struct cmd_list_element *c)
967 {
968 gdb_assert (max_value_size == -1 || max_value_size >= 0);
969
970 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
971 {
972 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
973 error (_("max-value-size set too low, increasing to %d bytes"),
974 max_value_size);
975 }
976 }
977
978 /* Implement the "show max-value-size" command. */
979
980 static void
981 show_max_value_size (struct ui_file *file, int from_tty,
982 struct cmd_list_element *c, const char *value)
983 {
984 if (max_value_size == -1)
985 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
986 else
987 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
988 max_value_size);
989 }
990
991 /* Called before we attempt to allocate or reallocate a buffer for the
992 contents of a value. TYPE is the type of the value for which we are
993 allocating the buffer. If the buffer is too large (based on the user
994 controllable setting) then throw an error. If this function returns
995 then we should attempt to allocate the buffer. */
996
997 static void
998 check_type_length_before_alloc (const struct type *type)
999 {
1000 unsigned int length = TYPE_LENGTH (type);
1001
1002 if (max_value_size > -1 && length > max_value_size)
1003 {
1004 if (TYPE_NAME (type) != NULL)
1005 error (_("value of type `%s' requires %u bytes, which is more "
1006 "than max-value-size"), TYPE_NAME (type), length);
1007 else
1008 error (_("value requires %u bytes, which is more than "
1009 "max-value-size"), length);
1010 }
1011 }
1012
1013 /* Allocate the contents of VAL if it has not been allocated yet. */
1014
1015 static void
1016 allocate_value_contents (struct value *val)
1017 {
1018 if (!val->contents)
1019 {
1020 check_type_length_before_alloc (val->enclosing_type);
1021 val->contents.reset
1022 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1023 }
1024 }
1025
1026 /* Allocate a value and its contents for type TYPE. */
1027
1028 struct value *
1029 allocate_value (struct type *type)
1030 {
1031 struct value *val = allocate_value_lazy (type);
1032
1033 allocate_value_contents (val);
1034 val->lazy = 0;
1035 return val;
1036 }
1037
1038 /* Allocate a value that has the correct length
1039 for COUNT repetitions of type TYPE. */
1040
1041 struct value *
1042 allocate_repeat_value (struct type *type, int count)
1043 {
1044 int low_bound = current_language->string_lower_bound; /* ??? */
1045 /* FIXME-type-allocation: need a way to free this type when we are
1046 done with it. */
1047 struct type *array_type
1048 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1049
1050 return allocate_value (array_type);
1051 }
1052
1053 struct value *
1054 allocate_computed_value (struct type *type,
1055 const struct lval_funcs *funcs,
1056 void *closure)
1057 {
1058 struct value *v = allocate_value_lazy (type);
1059
1060 VALUE_LVAL (v) = lval_computed;
1061 v->location.computed.funcs = funcs;
1062 v->location.computed.closure = closure;
1063
1064 return v;
1065 }
1066
1067 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1068
1069 struct value *
1070 allocate_optimized_out_value (struct type *type)
1071 {
1072 struct value *retval = allocate_value_lazy (type);
1073
1074 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1075 set_value_lazy (retval, 0);
1076 return retval;
1077 }
1078
1079 /* Accessor methods. */
1080
1081 struct type *
1082 value_type (const struct value *value)
1083 {
1084 return value->type;
1085 }
1086 void
1087 deprecated_set_value_type (struct value *value, struct type *type)
1088 {
1089 value->type = type;
1090 }
1091
1092 LONGEST
1093 value_offset (const struct value *value)
1094 {
1095 return value->offset;
1096 }
1097 void
1098 set_value_offset (struct value *value, LONGEST offset)
1099 {
1100 value->offset = offset;
1101 }
1102
1103 LONGEST
1104 value_bitpos (const struct value *value)
1105 {
1106 return value->bitpos;
1107 }
1108 void
1109 set_value_bitpos (struct value *value, LONGEST bit)
1110 {
1111 value->bitpos = bit;
1112 }
1113
1114 LONGEST
1115 value_bitsize (const struct value *value)
1116 {
1117 return value->bitsize;
1118 }
1119 void
1120 set_value_bitsize (struct value *value, LONGEST bit)
1121 {
1122 value->bitsize = bit;
1123 }
1124
1125 struct value *
1126 value_parent (const struct value *value)
1127 {
1128 return value->parent.get ();
1129 }
1130
1131 /* See value.h. */
1132
1133 void
1134 set_value_parent (struct value *value, struct value *parent)
1135 {
1136 value->parent = value_ref_ptr::new_reference (parent);
1137 }
1138
1139 gdb_byte *
1140 value_contents_raw (struct value *value)
1141 {
1142 struct gdbarch *arch = get_value_arch (value);
1143 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1144
1145 allocate_value_contents (value);
1146 return value->contents.get () + value->embedded_offset * unit_size;
1147 }
1148
1149 gdb_byte *
1150 value_contents_all_raw (struct value *value)
1151 {
1152 allocate_value_contents (value);
1153 return value->contents.get ();
1154 }
1155
1156 struct type *
1157 value_enclosing_type (const struct value *value)
1158 {
1159 return value->enclosing_type;
1160 }
1161
1162 /* Look at value.h for description. */
1163
1164 struct type *
1165 value_actual_type (struct value *value, int resolve_simple_types,
1166 int *real_type_found)
1167 {
1168 struct value_print_options opts;
1169 struct type *result;
1170
1171 get_user_print_options (&opts);
1172
1173 if (real_type_found)
1174 *real_type_found = 0;
1175 result = value_type (value);
1176 if (opts.objectprint)
1177 {
1178 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1179 fetch its rtti type. */
1180 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1181 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1182 == TYPE_CODE_STRUCT
1183 && !value_optimized_out (value))
1184 {
1185 struct type *real_type;
1186
1187 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1188 if (real_type)
1189 {
1190 if (real_type_found)
1191 *real_type_found = 1;
1192 result = real_type;
1193 }
1194 }
1195 else if (resolve_simple_types)
1196 {
1197 if (real_type_found)
1198 *real_type_found = 1;
1199 result = value_enclosing_type (value);
1200 }
1201 }
1202
1203 return result;
1204 }
1205
1206 void
1207 error_value_optimized_out (void)
1208 {
1209 error (_("value has been optimized out"));
1210 }
1211
1212 static void
1213 require_not_optimized_out (const struct value *value)
1214 {
1215 if (!value->optimized_out.empty ())
1216 {
1217 if (value->lval == lval_register)
1218 error (_("register has not been saved in frame"));
1219 else
1220 error_value_optimized_out ();
1221 }
1222 }
1223
1224 static void
1225 require_available (const struct value *value)
1226 {
1227 if (!value->unavailable.empty ())
1228 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1229 }
1230
1231 const gdb_byte *
1232 value_contents_for_printing (struct value *value)
1233 {
1234 if (value->lazy)
1235 value_fetch_lazy (value);
1236 return value->contents.get ();
1237 }
1238
1239 const gdb_byte *
1240 value_contents_for_printing_const (const struct value *value)
1241 {
1242 gdb_assert (!value->lazy);
1243 return value->contents.get ();
1244 }
1245
1246 const gdb_byte *
1247 value_contents_all (struct value *value)
1248 {
1249 const gdb_byte *result = value_contents_for_printing (value);
1250 require_not_optimized_out (value);
1251 require_available (value);
1252 return result;
1253 }
1254
1255 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1256 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1257
1258 static void
1259 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1260 const std::vector<range> &src_range, int src_bit_offset,
1261 int bit_length)
1262 {
1263 for (const range &r : src_range)
1264 {
1265 ULONGEST h, l;
1266
1267 l = std::max (r.offset, (LONGEST) src_bit_offset);
1268 h = std::min (r.offset + r.length,
1269 (LONGEST) src_bit_offset + bit_length);
1270
1271 if (l < h)
1272 insert_into_bit_range_vector (dst_range,
1273 dst_bit_offset + (l - src_bit_offset),
1274 h - l);
1275 }
1276 }
1277
1278 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1279 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1280
1281 static void
1282 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1283 const struct value *src, int src_bit_offset,
1284 int bit_length)
1285 {
1286 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1287 src->unavailable, src_bit_offset,
1288 bit_length);
1289 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1290 src->optimized_out, src_bit_offset,
1291 bit_length);
1292 }
1293
1294 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1295 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1296 contents, starting at DST_OFFSET. If unavailable contents are
1297 being copied from SRC, the corresponding DST contents are marked
1298 unavailable accordingly. Neither DST nor SRC may be lazy
1299 values.
1300
1301 It is assumed the contents of DST in the [DST_OFFSET,
1302 DST_OFFSET+LENGTH) range are wholly available. */
1303
1304 void
1305 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1306 struct value *src, LONGEST src_offset, LONGEST length)
1307 {
1308 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1309 struct gdbarch *arch = get_value_arch (src);
1310 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1311
1312 /* A lazy DST would make that this copy operation useless, since as
1313 soon as DST's contents were un-lazied (by a later value_contents
1314 call, say), the contents would be overwritten. A lazy SRC would
1315 mean we'd be copying garbage. */
1316 gdb_assert (!dst->lazy && !src->lazy);
1317
1318 /* The overwritten DST range gets unavailability ORed in, not
1319 replaced. Make sure to remember to implement replacing if it
1320 turns out actually necessary. */
1321 gdb_assert (value_bytes_available (dst, dst_offset, length));
1322 gdb_assert (!value_bits_any_optimized_out (dst,
1323 TARGET_CHAR_BIT * dst_offset,
1324 TARGET_CHAR_BIT * length));
1325
1326 /* Copy the data. */
1327 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1328 value_contents_all_raw (src) + src_offset * unit_size,
1329 length * unit_size);
1330
1331 /* Copy the meta-data, adjusted. */
1332 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1333 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1334 bit_length = length * unit_size * HOST_CHAR_BIT;
1335
1336 value_ranges_copy_adjusted (dst, dst_bit_offset,
1337 src, src_bit_offset,
1338 bit_length);
1339 }
1340
1341 /* Copy LENGTH bytes of SRC value's (all) contents
1342 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1343 (all) contents, starting at DST_OFFSET. If unavailable contents
1344 are being copied from SRC, the corresponding DST contents are
1345 marked unavailable accordingly. DST must not be lazy. If SRC is
1346 lazy, it will be fetched now.
1347
1348 It is assumed the contents of DST in the [DST_OFFSET,
1349 DST_OFFSET+LENGTH) range are wholly available. */
1350
1351 void
1352 value_contents_copy (struct value *dst, LONGEST dst_offset,
1353 struct value *src, LONGEST src_offset, LONGEST length)
1354 {
1355 if (src->lazy)
1356 value_fetch_lazy (src);
1357
1358 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1359 }
1360
1361 int
1362 value_lazy (const struct value *value)
1363 {
1364 return value->lazy;
1365 }
1366
1367 void
1368 set_value_lazy (struct value *value, int val)
1369 {
1370 value->lazy = val;
1371 }
1372
1373 int
1374 value_stack (const struct value *value)
1375 {
1376 return value->stack;
1377 }
1378
1379 void
1380 set_value_stack (struct value *value, int val)
1381 {
1382 value->stack = val;
1383 }
1384
1385 const gdb_byte *
1386 value_contents (struct value *value)
1387 {
1388 const gdb_byte *result = value_contents_writeable (value);
1389 require_not_optimized_out (value);
1390 require_available (value);
1391 return result;
1392 }
1393
1394 gdb_byte *
1395 value_contents_writeable (struct value *value)
1396 {
1397 if (value->lazy)
1398 value_fetch_lazy (value);
1399 return value_contents_raw (value);
1400 }
1401
1402 int
1403 value_optimized_out (struct value *value)
1404 {
1405 /* We can only know if a value is optimized out once we have tried to
1406 fetch it. */
1407 if (value->optimized_out.empty () && value->lazy)
1408 {
1409 TRY
1410 {
1411 value_fetch_lazy (value);
1412 }
1413 CATCH (ex, RETURN_MASK_ERROR)
1414 {
1415 /* Fall back to checking value->optimized_out. */
1416 }
1417 END_CATCH
1418 }
1419
1420 return !value->optimized_out.empty ();
1421 }
1422
1423 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1424 the following LENGTH bytes. */
1425
1426 void
1427 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1428 {
1429 mark_value_bits_optimized_out (value,
1430 offset * TARGET_CHAR_BIT,
1431 length * TARGET_CHAR_BIT);
1432 }
1433
1434 /* See value.h. */
1435
1436 void
1437 mark_value_bits_optimized_out (struct value *value,
1438 LONGEST offset, LONGEST length)
1439 {
1440 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1441 }
1442
1443 int
1444 value_bits_synthetic_pointer (const struct value *value,
1445 LONGEST offset, LONGEST length)
1446 {
1447 if (value->lval != lval_computed
1448 || !value->location.computed.funcs->check_synthetic_pointer)
1449 return 0;
1450 return value->location.computed.funcs->check_synthetic_pointer (value,
1451 offset,
1452 length);
1453 }
1454
1455 LONGEST
1456 value_embedded_offset (const struct value *value)
1457 {
1458 return value->embedded_offset;
1459 }
1460
1461 void
1462 set_value_embedded_offset (struct value *value, LONGEST val)
1463 {
1464 value->embedded_offset = val;
1465 }
1466
1467 LONGEST
1468 value_pointed_to_offset (const struct value *value)
1469 {
1470 return value->pointed_to_offset;
1471 }
1472
1473 void
1474 set_value_pointed_to_offset (struct value *value, LONGEST val)
1475 {
1476 value->pointed_to_offset = val;
1477 }
1478
1479 const struct lval_funcs *
1480 value_computed_funcs (const struct value *v)
1481 {
1482 gdb_assert (value_lval_const (v) == lval_computed);
1483
1484 return v->location.computed.funcs;
1485 }
1486
1487 void *
1488 value_computed_closure (const struct value *v)
1489 {
1490 gdb_assert (v->lval == lval_computed);
1491
1492 return v->location.computed.closure;
1493 }
1494
1495 enum lval_type *
1496 deprecated_value_lval_hack (struct value *value)
1497 {
1498 return &value->lval;
1499 }
1500
1501 enum lval_type
1502 value_lval_const (const struct value *value)
1503 {
1504 return value->lval;
1505 }
1506
1507 CORE_ADDR
1508 value_address (const struct value *value)
1509 {
1510 if (value->lval != lval_memory)
1511 return 0;
1512 if (value->parent != NULL)
1513 return value_address (value->parent.get ()) + value->offset;
1514 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1515 {
1516 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1517 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1518 }
1519
1520 return value->location.address + value->offset;
1521 }
1522
1523 CORE_ADDR
1524 value_raw_address (const struct value *value)
1525 {
1526 if (value->lval != lval_memory)
1527 return 0;
1528 return value->location.address;
1529 }
1530
1531 void
1532 set_value_address (struct value *value, CORE_ADDR addr)
1533 {
1534 gdb_assert (value->lval == lval_memory);
1535 value->location.address = addr;
1536 }
1537
1538 struct internalvar **
1539 deprecated_value_internalvar_hack (struct value *value)
1540 {
1541 return &value->location.internalvar;
1542 }
1543
1544 struct frame_id *
1545 deprecated_value_next_frame_id_hack (struct value *value)
1546 {
1547 gdb_assert (value->lval == lval_register);
1548 return &value->location.reg.next_frame_id;
1549 }
1550
1551 int *
1552 deprecated_value_regnum_hack (struct value *value)
1553 {
1554 gdb_assert (value->lval == lval_register);
1555 return &value->location.reg.regnum;
1556 }
1557
1558 int
1559 deprecated_value_modifiable (const struct value *value)
1560 {
1561 return value->modifiable;
1562 }
1563 \f
1564 /* Return a mark in the value chain. All values allocated after the
1565 mark is obtained (except for those released) are subject to being freed
1566 if a subsequent value_free_to_mark is passed the mark. */
1567 struct value *
1568 value_mark (void)
1569 {
1570 if (all_values.empty ())
1571 return nullptr;
1572 return all_values.back ().get ();
1573 }
1574
1575 /* See value.h. */
1576
1577 void
1578 value_incref (struct value *val)
1579 {
1580 val->reference_count++;
1581 }
1582
1583 /* Release a reference to VAL, which was acquired with value_incref.
1584 This function is also called to deallocate values from the value
1585 chain. */
1586
1587 void
1588 value_decref (struct value *val)
1589 {
1590 if (val != nullptr)
1591 {
1592 gdb_assert (val->reference_count > 0);
1593 val->reference_count--;
1594 if (val->reference_count == 0)
1595 delete val;
1596 }
1597 }
1598
1599 /* Free all values allocated since MARK was obtained by value_mark
1600 (except for those released). */
1601 void
1602 value_free_to_mark (const struct value *mark)
1603 {
1604 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1605 if (iter == all_values.end ())
1606 all_values.clear ();
1607 else
1608 all_values.erase (iter + 1, all_values.end ());
1609 }
1610
1611 /* Remove VAL from the chain all_values
1612 so it will not be freed automatically. */
1613
1614 value_ref_ptr
1615 release_value (struct value *val)
1616 {
1617 if (val == nullptr)
1618 return value_ref_ptr ();
1619
1620 std::vector<value_ref_ptr>::reverse_iterator iter;
1621 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1622 {
1623 if (*iter == val)
1624 {
1625 value_ref_ptr result = *iter;
1626 all_values.erase (iter.base () - 1);
1627 return result;
1628 }
1629 }
1630
1631 /* We must always return an owned reference. Normally this happens
1632 because we transfer the reference from the value chain, but in
1633 this case the value was not on the chain. */
1634 return value_ref_ptr::new_reference (val);
1635 }
1636
1637 /* See value.h. */
1638
1639 std::vector<value_ref_ptr>
1640 value_release_to_mark (const struct value *mark)
1641 {
1642 std::vector<value_ref_ptr> result;
1643
1644 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1645 if (iter == all_values.end ())
1646 std::swap (result, all_values);
1647 else
1648 {
1649 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1650 all_values.erase (iter + 1, all_values.end ());
1651 }
1652 std::reverse (result.begin (), result.end ());
1653 return result;
1654 }
1655
1656 /* Return a copy of the value ARG.
1657 It contains the same contents, for same memory address,
1658 but it's a different block of storage. */
1659
1660 struct value *
1661 value_copy (struct value *arg)
1662 {
1663 struct type *encl_type = value_enclosing_type (arg);
1664 struct value *val;
1665
1666 if (value_lazy (arg))
1667 val = allocate_value_lazy (encl_type);
1668 else
1669 val = allocate_value (encl_type);
1670 val->type = arg->type;
1671 VALUE_LVAL (val) = VALUE_LVAL (arg);
1672 val->location = arg->location;
1673 val->offset = arg->offset;
1674 val->bitpos = arg->bitpos;
1675 val->bitsize = arg->bitsize;
1676 val->lazy = arg->lazy;
1677 val->embedded_offset = value_embedded_offset (arg);
1678 val->pointed_to_offset = arg->pointed_to_offset;
1679 val->modifiable = arg->modifiable;
1680 if (!value_lazy (val))
1681 {
1682 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1683 TYPE_LENGTH (value_enclosing_type (arg)));
1684
1685 }
1686 val->unavailable = arg->unavailable;
1687 val->optimized_out = arg->optimized_out;
1688 val->parent = arg->parent;
1689 if (VALUE_LVAL (val) == lval_computed)
1690 {
1691 const struct lval_funcs *funcs = val->location.computed.funcs;
1692
1693 if (funcs->copy_closure)
1694 val->location.computed.closure = funcs->copy_closure (val);
1695 }
1696 return val;
1697 }
1698
1699 /* Return a "const" and/or "volatile" qualified version of the value V.
1700 If CNST is true, then the returned value will be qualified with
1701 "const".
1702 if VOLTL is true, then the returned value will be qualified with
1703 "volatile". */
1704
1705 struct value *
1706 make_cv_value (int cnst, int voltl, struct value *v)
1707 {
1708 struct type *val_type = value_type (v);
1709 struct type *enclosing_type = value_enclosing_type (v);
1710 struct value *cv_val = value_copy (v);
1711
1712 deprecated_set_value_type (cv_val,
1713 make_cv_type (cnst, voltl, val_type, NULL));
1714 set_value_enclosing_type (cv_val,
1715 make_cv_type (cnst, voltl, enclosing_type, NULL));
1716
1717 return cv_val;
1718 }
1719
1720 /* Return a version of ARG that is non-lvalue. */
1721
1722 struct value *
1723 value_non_lval (struct value *arg)
1724 {
1725 if (VALUE_LVAL (arg) != not_lval)
1726 {
1727 struct type *enc_type = value_enclosing_type (arg);
1728 struct value *val = allocate_value (enc_type);
1729
1730 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1731 TYPE_LENGTH (enc_type));
1732 val->type = arg->type;
1733 set_value_embedded_offset (val, value_embedded_offset (arg));
1734 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1735 return val;
1736 }
1737 return arg;
1738 }
1739
1740 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1741
1742 void
1743 value_force_lval (struct value *v, CORE_ADDR addr)
1744 {
1745 gdb_assert (VALUE_LVAL (v) == not_lval);
1746
1747 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1748 v->lval = lval_memory;
1749 v->location.address = addr;
1750 }
1751
1752 void
1753 set_value_component_location (struct value *component,
1754 const struct value *whole)
1755 {
1756 struct type *type;
1757
1758 gdb_assert (whole->lval != lval_xcallable);
1759
1760 if (whole->lval == lval_internalvar)
1761 VALUE_LVAL (component) = lval_internalvar_component;
1762 else
1763 VALUE_LVAL (component) = whole->lval;
1764
1765 component->location = whole->location;
1766 if (whole->lval == lval_computed)
1767 {
1768 const struct lval_funcs *funcs = whole->location.computed.funcs;
1769
1770 if (funcs->copy_closure)
1771 component->location.computed.closure = funcs->copy_closure (whole);
1772 }
1773
1774 /* If type has a dynamic resolved location property
1775 update it's value address. */
1776 type = value_type (whole);
1777 if (NULL != TYPE_DATA_LOCATION (type)
1778 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1779 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1780 }
1781
1782 /* Access to the value history. */
1783
1784 /* Record a new value in the value history.
1785 Returns the absolute history index of the entry. */
1786
1787 int
1788 record_latest_value (struct value *val)
1789 {
1790 /* We don't want this value to have anything to do with the inferior anymore.
1791 In particular, "set $1 = 50" should not affect the variable from which
1792 the value was taken, and fast watchpoints should be able to assume that
1793 a value on the value history never changes. */
1794 if (value_lazy (val))
1795 value_fetch_lazy (val);
1796 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1797 from. This is a bit dubious, because then *&$1 does not just return $1
1798 but the current contents of that location. c'est la vie... */
1799 val->modifiable = 0;
1800
1801 value_history.push_back (release_value (val));
1802
1803 return value_history.size ();
1804 }
1805
1806 /* Return a copy of the value in the history with sequence number NUM. */
1807
1808 struct value *
1809 access_value_history (int num)
1810 {
1811 int absnum = num;
1812
1813 if (absnum <= 0)
1814 absnum += value_history.size ();
1815
1816 if (absnum <= 0)
1817 {
1818 if (num == 0)
1819 error (_("The history is empty."));
1820 else if (num == 1)
1821 error (_("There is only one value in the history."));
1822 else
1823 error (_("History does not go back to $$%d."), -num);
1824 }
1825 if (absnum > value_history.size ())
1826 error (_("History has not yet reached $%d."), absnum);
1827
1828 absnum--;
1829
1830 return value_copy (value_history[absnum].get ());
1831 }
1832
1833 static void
1834 show_values (const char *num_exp, int from_tty)
1835 {
1836 int i;
1837 struct value *val;
1838 static int num = 1;
1839
1840 if (num_exp)
1841 {
1842 /* "show values +" should print from the stored position.
1843 "show values <exp>" should print around value number <exp>. */
1844 if (num_exp[0] != '+' || num_exp[1] != '\0')
1845 num = parse_and_eval_long (num_exp) - 5;
1846 }
1847 else
1848 {
1849 /* "show values" means print the last 10 values. */
1850 num = value_history.size () - 9;
1851 }
1852
1853 if (num <= 0)
1854 num = 1;
1855
1856 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1857 {
1858 struct value_print_options opts;
1859
1860 val = access_value_history (i);
1861 printf_filtered (("$%d = "), i);
1862 get_user_print_options (&opts);
1863 value_print (val, gdb_stdout, &opts);
1864 printf_filtered (("\n"));
1865 }
1866
1867 /* The next "show values +" should start after what we just printed. */
1868 num += 10;
1869
1870 /* Hitting just return after this command should do the same thing as
1871 "show values +". If num_exp is null, this is unnecessary, since
1872 "show values +" is not useful after "show values". */
1873 if (from_tty && num_exp)
1874 set_repeat_arguments ("+");
1875 }
1876 \f
1877 enum internalvar_kind
1878 {
1879 /* The internal variable is empty. */
1880 INTERNALVAR_VOID,
1881
1882 /* The value of the internal variable is provided directly as
1883 a GDB value object. */
1884 INTERNALVAR_VALUE,
1885
1886 /* A fresh value is computed via a call-back routine on every
1887 access to the internal variable. */
1888 INTERNALVAR_MAKE_VALUE,
1889
1890 /* The internal variable holds a GDB internal convenience function. */
1891 INTERNALVAR_FUNCTION,
1892
1893 /* The variable holds an integer value. */
1894 INTERNALVAR_INTEGER,
1895
1896 /* The variable holds a GDB-provided string. */
1897 INTERNALVAR_STRING,
1898 };
1899
1900 union internalvar_data
1901 {
1902 /* A value object used with INTERNALVAR_VALUE. */
1903 struct value *value;
1904
1905 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1906 struct
1907 {
1908 /* The functions to call. */
1909 const struct internalvar_funcs *functions;
1910
1911 /* The function's user-data. */
1912 void *data;
1913 } make_value;
1914
1915 /* The internal function used with INTERNALVAR_FUNCTION. */
1916 struct
1917 {
1918 struct internal_function *function;
1919 /* True if this is the canonical name for the function. */
1920 int canonical;
1921 } fn;
1922
1923 /* An integer value used with INTERNALVAR_INTEGER. */
1924 struct
1925 {
1926 /* If type is non-NULL, it will be used as the type to generate
1927 a value for this internal variable. If type is NULL, a default
1928 integer type for the architecture is used. */
1929 struct type *type;
1930 LONGEST val;
1931 } integer;
1932
1933 /* A string value used with INTERNALVAR_STRING. */
1934 char *string;
1935 };
1936
1937 /* Internal variables. These are variables within the debugger
1938 that hold values assigned by debugger commands.
1939 The user refers to them with a '$' prefix
1940 that does not appear in the variable names stored internally. */
1941
1942 struct internalvar
1943 {
1944 struct internalvar *next;
1945 char *name;
1946
1947 /* We support various different kinds of content of an internal variable.
1948 enum internalvar_kind specifies the kind, and union internalvar_data
1949 provides the data associated with this particular kind. */
1950
1951 enum internalvar_kind kind;
1952
1953 union internalvar_data u;
1954 };
1955
1956 static struct internalvar *internalvars;
1957
1958 /* If the variable does not already exist create it and give it the
1959 value given. If no value is given then the default is zero. */
1960 static void
1961 init_if_undefined_command (const char* args, int from_tty)
1962 {
1963 struct internalvar* intvar;
1964
1965 /* Parse the expression - this is taken from set_command(). */
1966 expression_up expr = parse_expression (args);
1967
1968 /* Validate the expression.
1969 Was the expression an assignment?
1970 Or even an expression at all? */
1971 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1972 error (_("Init-if-undefined requires an assignment expression."));
1973
1974 /* Extract the variable from the parsed expression.
1975 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1976 if (expr->elts[1].opcode != OP_INTERNALVAR)
1977 error (_("The first parameter to init-if-undefined "
1978 "should be a GDB variable."));
1979 intvar = expr->elts[2].internalvar;
1980
1981 /* Only evaluate the expression if the lvalue is void.
1982 This may still fail if the expresssion is invalid. */
1983 if (intvar->kind == INTERNALVAR_VOID)
1984 evaluate_expression (expr.get ());
1985 }
1986
1987
1988 /* Look up an internal variable with name NAME. NAME should not
1989 normally include a dollar sign.
1990
1991 If the specified internal variable does not exist,
1992 the return value is NULL. */
1993
1994 struct internalvar *
1995 lookup_only_internalvar (const char *name)
1996 {
1997 struct internalvar *var;
1998
1999 for (var = internalvars; var; var = var->next)
2000 if (strcmp (var->name, name) == 0)
2001 return var;
2002
2003 return NULL;
2004 }
2005
2006 /* Complete NAME by comparing it to the names of internal
2007 variables. */
2008
2009 void
2010 complete_internalvar (completion_tracker &tracker, const char *name)
2011 {
2012 struct internalvar *var;
2013 int len;
2014
2015 len = strlen (name);
2016
2017 for (var = internalvars; var; var = var->next)
2018 if (strncmp (var->name, name, len) == 0)
2019 {
2020 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2021
2022 tracker.add_completion (std::move (copy));
2023 }
2024 }
2025
2026 /* Create an internal variable with name NAME and with a void value.
2027 NAME should not normally include a dollar sign. */
2028
2029 struct internalvar *
2030 create_internalvar (const char *name)
2031 {
2032 struct internalvar *var = XNEW (struct internalvar);
2033
2034 var->name = concat (name, (char *)NULL);
2035 var->kind = INTERNALVAR_VOID;
2036 var->next = internalvars;
2037 internalvars = var;
2038 return var;
2039 }
2040
2041 /* Create an internal variable with name NAME and register FUN as the
2042 function that value_of_internalvar uses to create a value whenever
2043 this variable is referenced. NAME should not normally include a
2044 dollar sign. DATA is passed uninterpreted to FUN when it is
2045 called. CLEANUP, if not NULL, is called when the internal variable
2046 is destroyed. It is passed DATA as its only argument. */
2047
2048 struct internalvar *
2049 create_internalvar_type_lazy (const char *name,
2050 const struct internalvar_funcs *funcs,
2051 void *data)
2052 {
2053 struct internalvar *var = create_internalvar (name);
2054
2055 var->kind = INTERNALVAR_MAKE_VALUE;
2056 var->u.make_value.functions = funcs;
2057 var->u.make_value.data = data;
2058 return var;
2059 }
2060
2061 /* See documentation in value.h. */
2062
2063 int
2064 compile_internalvar_to_ax (struct internalvar *var,
2065 struct agent_expr *expr,
2066 struct axs_value *value)
2067 {
2068 if (var->kind != INTERNALVAR_MAKE_VALUE
2069 || var->u.make_value.functions->compile_to_ax == NULL)
2070 return 0;
2071
2072 var->u.make_value.functions->compile_to_ax (var, expr, value,
2073 var->u.make_value.data);
2074 return 1;
2075 }
2076
2077 /* Look up an internal variable with name NAME. NAME should not
2078 normally include a dollar sign.
2079
2080 If the specified internal variable does not exist,
2081 one is created, with a void value. */
2082
2083 struct internalvar *
2084 lookup_internalvar (const char *name)
2085 {
2086 struct internalvar *var;
2087
2088 var = lookup_only_internalvar (name);
2089 if (var)
2090 return var;
2091
2092 return create_internalvar (name);
2093 }
2094
2095 /* Return current value of internal variable VAR. For variables that
2096 are not inherently typed, use a value type appropriate for GDBARCH. */
2097
2098 struct value *
2099 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2100 {
2101 struct value *val;
2102 struct trace_state_variable *tsv;
2103
2104 /* If there is a trace state variable of the same name, assume that
2105 is what we really want to see. */
2106 tsv = find_trace_state_variable (var->name);
2107 if (tsv)
2108 {
2109 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2110 &(tsv->value));
2111 if (tsv->value_known)
2112 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2113 tsv->value);
2114 else
2115 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2116 return val;
2117 }
2118
2119 switch (var->kind)
2120 {
2121 case INTERNALVAR_VOID:
2122 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2123 break;
2124
2125 case INTERNALVAR_FUNCTION:
2126 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2127 break;
2128
2129 case INTERNALVAR_INTEGER:
2130 if (!var->u.integer.type)
2131 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2132 var->u.integer.val);
2133 else
2134 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2135 break;
2136
2137 case INTERNALVAR_STRING:
2138 val = value_cstring (var->u.string, strlen (var->u.string),
2139 builtin_type (gdbarch)->builtin_char);
2140 break;
2141
2142 case INTERNALVAR_VALUE:
2143 val = value_copy (var->u.value);
2144 if (value_lazy (val))
2145 value_fetch_lazy (val);
2146 break;
2147
2148 case INTERNALVAR_MAKE_VALUE:
2149 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2150 var->u.make_value.data);
2151 break;
2152
2153 default:
2154 internal_error (__FILE__, __LINE__, _("bad kind"));
2155 }
2156
2157 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2158 on this value go back to affect the original internal variable.
2159
2160 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2161 no underlying modifyable state in the internal variable.
2162
2163 Likewise, if the variable's value is a computed lvalue, we want
2164 references to it to produce another computed lvalue, where
2165 references and assignments actually operate through the
2166 computed value's functions.
2167
2168 This means that internal variables with computed values
2169 behave a little differently from other internal variables:
2170 assignments to them don't just replace the previous value
2171 altogether. At the moment, this seems like the behavior we
2172 want. */
2173
2174 if (var->kind != INTERNALVAR_MAKE_VALUE
2175 && val->lval != lval_computed)
2176 {
2177 VALUE_LVAL (val) = lval_internalvar;
2178 VALUE_INTERNALVAR (val) = var;
2179 }
2180
2181 return val;
2182 }
2183
2184 int
2185 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2186 {
2187 if (var->kind == INTERNALVAR_INTEGER)
2188 {
2189 *result = var->u.integer.val;
2190 return 1;
2191 }
2192
2193 if (var->kind == INTERNALVAR_VALUE)
2194 {
2195 struct type *type = check_typedef (value_type (var->u.value));
2196
2197 if (TYPE_CODE (type) == TYPE_CODE_INT)
2198 {
2199 *result = value_as_long (var->u.value);
2200 return 1;
2201 }
2202 }
2203
2204 return 0;
2205 }
2206
2207 static int
2208 get_internalvar_function (struct internalvar *var,
2209 struct internal_function **result)
2210 {
2211 switch (var->kind)
2212 {
2213 case INTERNALVAR_FUNCTION:
2214 *result = var->u.fn.function;
2215 return 1;
2216
2217 default:
2218 return 0;
2219 }
2220 }
2221
2222 void
2223 set_internalvar_component (struct internalvar *var,
2224 LONGEST offset, LONGEST bitpos,
2225 LONGEST bitsize, struct value *newval)
2226 {
2227 gdb_byte *addr;
2228 struct gdbarch *arch;
2229 int unit_size;
2230
2231 switch (var->kind)
2232 {
2233 case INTERNALVAR_VALUE:
2234 addr = value_contents_writeable (var->u.value);
2235 arch = get_value_arch (var->u.value);
2236 unit_size = gdbarch_addressable_memory_unit_size (arch);
2237
2238 if (bitsize)
2239 modify_field (value_type (var->u.value), addr + offset,
2240 value_as_long (newval), bitpos, bitsize);
2241 else
2242 memcpy (addr + offset * unit_size, value_contents (newval),
2243 TYPE_LENGTH (value_type (newval)));
2244 break;
2245
2246 default:
2247 /* We can never get a component of any other kind. */
2248 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2249 }
2250 }
2251
2252 void
2253 set_internalvar (struct internalvar *var, struct value *val)
2254 {
2255 enum internalvar_kind new_kind;
2256 union internalvar_data new_data = { 0 };
2257
2258 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2259 error (_("Cannot overwrite convenience function %s"), var->name);
2260
2261 /* Prepare new contents. */
2262 switch (TYPE_CODE (check_typedef (value_type (val))))
2263 {
2264 case TYPE_CODE_VOID:
2265 new_kind = INTERNALVAR_VOID;
2266 break;
2267
2268 case TYPE_CODE_INTERNAL_FUNCTION:
2269 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2270 new_kind = INTERNALVAR_FUNCTION;
2271 get_internalvar_function (VALUE_INTERNALVAR (val),
2272 &new_data.fn.function);
2273 /* Copies created here are never canonical. */
2274 break;
2275
2276 default:
2277 new_kind = INTERNALVAR_VALUE;
2278 new_data.value = value_copy (val);
2279 new_data.value->modifiable = 1;
2280
2281 /* Force the value to be fetched from the target now, to avoid problems
2282 later when this internalvar is referenced and the target is gone or
2283 has changed. */
2284 if (value_lazy (new_data.value))
2285 value_fetch_lazy (new_data.value);
2286
2287 /* Release the value from the value chain to prevent it from being
2288 deleted by free_all_values. From here on this function should not
2289 call error () until new_data is installed into the var->u to avoid
2290 leaking memory. */
2291 release_value (new_data.value).release ();
2292
2293 /* Internal variables which are created from values with a dynamic
2294 location don't need the location property of the origin anymore.
2295 The resolved dynamic location is used prior then any other address
2296 when accessing the value.
2297 If we keep it, we would still refer to the origin value.
2298 Remove the location property in case it exist. */
2299 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2300
2301 break;
2302 }
2303
2304 /* Clean up old contents. */
2305 clear_internalvar (var);
2306
2307 /* Switch over. */
2308 var->kind = new_kind;
2309 var->u = new_data;
2310 /* End code which must not call error(). */
2311 }
2312
2313 void
2314 set_internalvar_integer (struct internalvar *var, LONGEST l)
2315 {
2316 /* Clean up old contents. */
2317 clear_internalvar (var);
2318
2319 var->kind = INTERNALVAR_INTEGER;
2320 var->u.integer.type = NULL;
2321 var->u.integer.val = l;
2322 }
2323
2324 void
2325 set_internalvar_string (struct internalvar *var, const char *string)
2326 {
2327 /* Clean up old contents. */
2328 clear_internalvar (var);
2329
2330 var->kind = INTERNALVAR_STRING;
2331 var->u.string = xstrdup (string);
2332 }
2333
2334 static void
2335 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2336 {
2337 /* Clean up old contents. */
2338 clear_internalvar (var);
2339
2340 var->kind = INTERNALVAR_FUNCTION;
2341 var->u.fn.function = f;
2342 var->u.fn.canonical = 1;
2343 /* Variables installed here are always the canonical version. */
2344 }
2345
2346 void
2347 clear_internalvar (struct internalvar *var)
2348 {
2349 /* Clean up old contents. */
2350 switch (var->kind)
2351 {
2352 case INTERNALVAR_VALUE:
2353 value_decref (var->u.value);
2354 break;
2355
2356 case INTERNALVAR_STRING:
2357 xfree (var->u.string);
2358 break;
2359
2360 case INTERNALVAR_MAKE_VALUE:
2361 if (var->u.make_value.functions->destroy != NULL)
2362 var->u.make_value.functions->destroy (var->u.make_value.data);
2363 break;
2364
2365 default:
2366 break;
2367 }
2368
2369 /* Reset to void kind. */
2370 var->kind = INTERNALVAR_VOID;
2371 }
2372
2373 char *
2374 internalvar_name (const struct internalvar *var)
2375 {
2376 return var->name;
2377 }
2378
2379 static struct internal_function *
2380 create_internal_function (const char *name,
2381 internal_function_fn handler, void *cookie)
2382 {
2383 struct internal_function *ifn = XNEW (struct internal_function);
2384
2385 ifn->name = xstrdup (name);
2386 ifn->handler = handler;
2387 ifn->cookie = cookie;
2388 return ifn;
2389 }
2390
2391 char *
2392 value_internal_function_name (struct value *val)
2393 {
2394 struct internal_function *ifn;
2395 int result;
2396
2397 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2398 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2399 gdb_assert (result);
2400
2401 return ifn->name;
2402 }
2403
2404 struct value *
2405 call_internal_function (struct gdbarch *gdbarch,
2406 const struct language_defn *language,
2407 struct value *func, int argc, struct value **argv)
2408 {
2409 struct internal_function *ifn;
2410 int result;
2411
2412 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2413 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2414 gdb_assert (result);
2415
2416 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2417 }
2418
2419 /* The 'function' command. This does nothing -- it is just a
2420 placeholder to let "help function NAME" work. This is also used as
2421 the implementation of the sub-command that is created when
2422 registering an internal function. */
2423 static void
2424 function_command (const char *command, int from_tty)
2425 {
2426 /* Do nothing. */
2427 }
2428
2429 /* Clean up if an internal function's command is destroyed. */
2430 static void
2431 function_destroyer (struct cmd_list_element *self, void *ignore)
2432 {
2433 xfree ((char *) self->name);
2434 xfree ((char *) self->doc);
2435 }
2436
2437 /* Add a new internal function. NAME is the name of the function; DOC
2438 is a documentation string describing the function. HANDLER is
2439 called when the function is invoked. COOKIE is an arbitrary
2440 pointer which is passed to HANDLER and is intended for "user
2441 data". */
2442 void
2443 add_internal_function (const char *name, const char *doc,
2444 internal_function_fn handler, void *cookie)
2445 {
2446 struct cmd_list_element *cmd;
2447 struct internal_function *ifn;
2448 struct internalvar *var = lookup_internalvar (name);
2449
2450 ifn = create_internal_function (name, handler, cookie);
2451 set_internalvar_function (var, ifn);
2452
2453 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2454 &functionlist);
2455 cmd->destroyer = function_destroyer;
2456 }
2457
2458 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2459 prevent cycles / duplicates. */
2460
2461 void
2462 preserve_one_value (struct value *value, struct objfile *objfile,
2463 htab_t copied_types)
2464 {
2465 if (TYPE_OBJFILE (value->type) == objfile)
2466 value->type = copy_type_recursive (objfile, value->type, copied_types);
2467
2468 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2469 value->enclosing_type = copy_type_recursive (objfile,
2470 value->enclosing_type,
2471 copied_types);
2472 }
2473
2474 /* Likewise for internal variable VAR. */
2475
2476 static void
2477 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2478 htab_t copied_types)
2479 {
2480 switch (var->kind)
2481 {
2482 case INTERNALVAR_INTEGER:
2483 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2484 var->u.integer.type
2485 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2486 break;
2487
2488 case INTERNALVAR_VALUE:
2489 preserve_one_value (var->u.value, objfile, copied_types);
2490 break;
2491 }
2492 }
2493
2494 /* Update the internal variables and value history when OBJFILE is
2495 discarded; we must copy the types out of the objfile. New global types
2496 will be created for every convenience variable which currently points to
2497 this objfile's types, and the convenience variables will be adjusted to
2498 use the new global types. */
2499
2500 void
2501 preserve_values (struct objfile *objfile)
2502 {
2503 htab_t copied_types;
2504 struct internalvar *var;
2505
2506 /* Create the hash table. We allocate on the objfile's obstack, since
2507 it is soon to be deleted. */
2508 copied_types = create_copied_types_hash (objfile);
2509
2510 for (const value_ref_ptr &item : value_history)
2511 preserve_one_value (item.get (), objfile, copied_types);
2512
2513 for (var = internalvars; var; var = var->next)
2514 preserve_one_internalvar (var, objfile, copied_types);
2515
2516 preserve_ext_lang_values (objfile, copied_types);
2517
2518 htab_delete (copied_types);
2519 }
2520
2521 static void
2522 show_convenience (const char *ignore, int from_tty)
2523 {
2524 struct gdbarch *gdbarch = get_current_arch ();
2525 struct internalvar *var;
2526 int varseen = 0;
2527 struct value_print_options opts;
2528
2529 get_user_print_options (&opts);
2530 for (var = internalvars; var; var = var->next)
2531 {
2532
2533 if (!varseen)
2534 {
2535 varseen = 1;
2536 }
2537 printf_filtered (("$%s = "), var->name);
2538
2539 TRY
2540 {
2541 struct value *val;
2542
2543 val = value_of_internalvar (gdbarch, var);
2544 value_print (val, gdb_stdout, &opts);
2545 }
2546 CATCH (ex, RETURN_MASK_ERROR)
2547 {
2548 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2549 }
2550 END_CATCH
2551
2552 printf_filtered (("\n"));
2553 }
2554 if (!varseen)
2555 {
2556 /* This text does not mention convenience functions on purpose.
2557 The user can't create them except via Python, and if Python support
2558 is installed this message will never be printed ($_streq will
2559 exist). */
2560 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2561 "Convenience variables have "
2562 "names starting with \"$\";\n"
2563 "use \"set\" as in \"set "
2564 "$foo = 5\" to define them.\n"));
2565 }
2566 }
2567 \f
2568
2569 /* See value.h. */
2570
2571 struct value *
2572 value_from_xmethod (xmethod_worker_up &&worker)
2573 {
2574 struct value *v;
2575
2576 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2577 v->lval = lval_xcallable;
2578 v->location.xm_worker = worker.release ();
2579 v->modifiable = 0;
2580
2581 return v;
2582 }
2583
2584 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2585
2586 struct type *
2587 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2588 {
2589 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2590 && method->lval == lval_xcallable && argc > 0);
2591
2592 return method->location.xm_worker->get_result_type
2593 (argv[0], argv + 1, argc - 1);
2594 }
2595
2596 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2597
2598 struct value *
2599 call_xmethod (struct value *method, int argc, struct value **argv)
2600 {
2601 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2602 && method->lval == lval_xcallable && argc > 0);
2603
2604 return method->location.xm_worker->invoke (argv[0], argv + 1, argc - 1);
2605 }
2606 \f
2607 /* Extract a value as a C number (either long or double).
2608 Knows how to convert fixed values to double, or
2609 floating values to long.
2610 Does not deallocate the value. */
2611
2612 LONGEST
2613 value_as_long (struct value *val)
2614 {
2615 /* This coerces arrays and functions, which is necessary (e.g.
2616 in disassemble_command). It also dereferences references, which
2617 I suspect is the most logical thing to do. */
2618 val = coerce_array (val);
2619 return unpack_long (value_type (val), value_contents (val));
2620 }
2621
2622 /* Extract a value as a C pointer. Does not deallocate the value.
2623 Note that val's type may not actually be a pointer; value_as_long
2624 handles all the cases. */
2625 CORE_ADDR
2626 value_as_address (struct value *val)
2627 {
2628 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2629
2630 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2631 whether we want this to be true eventually. */
2632 #if 0
2633 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2634 non-address (e.g. argument to "signal", "info break", etc.), or
2635 for pointers to char, in which the low bits *are* significant. */
2636 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2637 #else
2638
2639 /* There are several targets (IA-64, PowerPC, and others) which
2640 don't represent pointers to functions as simply the address of
2641 the function's entry point. For example, on the IA-64, a
2642 function pointer points to a two-word descriptor, generated by
2643 the linker, which contains the function's entry point, and the
2644 value the IA-64 "global pointer" register should have --- to
2645 support position-independent code. The linker generates
2646 descriptors only for those functions whose addresses are taken.
2647
2648 On such targets, it's difficult for GDB to convert an arbitrary
2649 function address into a function pointer; it has to either find
2650 an existing descriptor for that function, or call malloc and
2651 build its own. On some targets, it is impossible for GDB to
2652 build a descriptor at all: the descriptor must contain a jump
2653 instruction; data memory cannot be executed; and code memory
2654 cannot be modified.
2655
2656 Upon entry to this function, if VAL is a value of type `function'
2657 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2658 value_address (val) is the address of the function. This is what
2659 you'll get if you evaluate an expression like `main'. The call
2660 to COERCE_ARRAY below actually does all the usual unary
2661 conversions, which includes converting values of type `function'
2662 to `pointer to function'. This is the challenging conversion
2663 discussed above. Then, `unpack_long' will convert that pointer
2664 back into an address.
2665
2666 So, suppose the user types `disassemble foo' on an architecture
2667 with a strange function pointer representation, on which GDB
2668 cannot build its own descriptors, and suppose further that `foo'
2669 has no linker-built descriptor. The address->pointer conversion
2670 will signal an error and prevent the command from running, even
2671 though the next step would have been to convert the pointer
2672 directly back into the same address.
2673
2674 The following shortcut avoids this whole mess. If VAL is a
2675 function, just return its address directly. */
2676 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2677 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2678 return value_address (val);
2679
2680 val = coerce_array (val);
2681
2682 /* Some architectures (e.g. Harvard), map instruction and data
2683 addresses onto a single large unified address space. For
2684 instance: An architecture may consider a large integer in the
2685 range 0x10000000 .. 0x1000ffff to already represent a data
2686 addresses (hence not need a pointer to address conversion) while
2687 a small integer would still need to be converted integer to
2688 pointer to address. Just assume such architectures handle all
2689 integer conversions in a single function. */
2690
2691 /* JimB writes:
2692
2693 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2694 must admonish GDB hackers to make sure its behavior matches the
2695 compiler's, whenever possible.
2696
2697 In general, I think GDB should evaluate expressions the same way
2698 the compiler does. When the user copies an expression out of
2699 their source code and hands it to a `print' command, they should
2700 get the same value the compiler would have computed. Any
2701 deviation from this rule can cause major confusion and annoyance,
2702 and needs to be justified carefully. In other words, GDB doesn't
2703 really have the freedom to do these conversions in clever and
2704 useful ways.
2705
2706 AndrewC pointed out that users aren't complaining about how GDB
2707 casts integers to pointers; they are complaining that they can't
2708 take an address from a disassembly listing and give it to `x/i'.
2709 This is certainly important.
2710
2711 Adding an architecture method like integer_to_address() certainly
2712 makes it possible for GDB to "get it right" in all circumstances
2713 --- the target has complete control over how things get done, so
2714 people can Do The Right Thing for their target without breaking
2715 anyone else. The standard doesn't specify how integers get
2716 converted to pointers; usually, the ABI doesn't either, but
2717 ABI-specific code is a more reasonable place to handle it. */
2718
2719 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2720 && !TYPE_IS_REFERENCE (value_type (val))
2721 && gdbarch_integer_to_address_p (gdbarch))
2722 return gdbarch_integer_to_address (gdbarch, value_type (val),
2723 value_contents (val));
2724
2725 return unpack_long (value_type (val), value_contents (val));
2726 #endif
2727 }
2728 \f
2729 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2730 as a long, or as a double, assuming the raw data is described
2731 by type TYPE. Knows how to convert different sizes of values
2732 and can convert between fixed and floating point. We don't assume
2733 any alignment for the raw data. Return value is in host byte order.
2734
2735 If you want functions and arrays to be coerced to pointers, and
2736 references to be dereferenced, call value_as_long() instead.
2737
2738 C++: It is assumed that the front-end has taken care of
2739 all matters concerning pointers to members. A pointer
2740 to member which reaches here is considered to be equivalent
2741 to an INT (or some size). After all, it is only an offset. */
2742
2743 LONGEST
2744 unpack_long (struct type *type, const gdb_byte *valaddr)
2745 {
2746 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2747 enum type_code code = TYPE_CODE (type);
2748 int len = TYPE_LENGTH (type);
2749 int nosign = TYPE_UNSIGNED (type);
2750
2751 switch (code)
2752 {
2753 case TYPE_CODE_TYPEDEF:
2754 return unpack_long (check_typedef (type), valaddr);
2755 case TYPE_CODE_ENUM:
2756 case TYPE_CODE_FLAGS:
2757 case TYPE_CODE_BOOL:
2758 case TYPE_CODE_INT:
2759 case TYPE_CODE_CHAR:
2760 case TYPE_CODE_RANGE:
2761 case TYPE_CODE_MEMBERPTR:
2762 if (nosign)
2763 return extract_unsigned_integer (valaddr, len, byte_order);
2764 else
2765 return extract_signed_integer (valaddr, len, byte_order);
2766
2767 case TYPE_CODE_FLT:
2768 case TYPE_CODE_DECFLOAT:
2769 return target_float_to_longest (valaddr, type);
2770
2771 case TYPE_CODE_PTR:
2772 case TYPE_CODE_REF:
2773 case TYPE_CODE_RVALUE_REF:
2774 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2775 whether we want this to be true eventually. */
2776 return extract_typed_address (valaddr, type);
2777
2778 default:
2779 error (_("Value can't be converted to integer."));
2780 }
2781 return 0; /* Placate lint. */
2782 }
2783
2784 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2785 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2786 We don't assume any alignment for the raw data. Return value is in
2787 host byte order.
2788
2789 If you want functions and arrays to be coerced to pointers, and
2790 references to be dereferenced, call value_as_address() instead.
2791
2792 C++: It is assumed that the front-end has taken care of
2793 all matters concerning pointers to members. A pointer
2794 to member which reaches here is considered to be equivalent
2795 to an INT (or some size). After all, it is only an offset. */
2796
2797 CORE_ADDR
2798 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2799 {
2800 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2801 whether we want this to be true eventually. */
2802 return unpack_long (type, valaddr);
2803 }
2804
2805 bool
2806 is_floating_value (struct value *val)
2807 {
2808 struct type *type = check_typedef (value_type (val));
2809
2810 if (is_floating_type (type))
2811 {
2812 if (!target_float_is_valid (value_contents (val), type))
2813 error (_("Invalid floating value found in program."));
2814 return true;
2815 }
2816
2817 return false;
2818 }
2819
2820 \f
2821 /* Get the value of the FIELDNO'th field (which must be static) of
2822 TYPE. */
2823
2824 struct value *
2825 value_static_field (struct type *type, int fieldno)
2826 {
2827 struct value *retval;
2828
2829 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2830 {
2831 case FIELD_LOC_KIND_PHYSADDR:
2832 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2833 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2834 break;
2835 case FIELD_LOC_KIND_PHYSNAME:
2836 {
2837 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2838 /* TYPE_FIELD_NAME (type, fieldno); */
2839 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2840
2841 if (sym.symbol == NULL)
2842 {
2843 /* With some compilers, e.g. HP aCC, static data members are
2844 reported as non-debuggable symbols. */
2845 struct bound_minimal_symbol msym
2846 = lookup_minimal_symbol (phys_name, NULL, NULL);
2847 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2848
2849 if (!msym.minsym)
2850 retval = allocate_optimized_out_value (field_type);
2851 else
2852 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2853 }
2854 else
2855 retval = value_of_variable (sym.symbol, sym.block);
2856 break;
2857 }
2858 default:
2859 gdb_assert_not_reached ("unexpected field location kind");
2860 }
2861
2862 return retval;
2863 }
2864
2865 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2866 You have to be careful here, since the size of the data area for the value
2867 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2868 than the old enclosing type, you have to allocate more space for the
2869 data. */
2870
2871 void
2872 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2873 {
2874 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2875 {
2876 check_type_length_before_alloc (new_encl_type);
2877 val->contents
2878 .reset ((gdb_byte *) xrealloc (val->contents.release (),
2879 TYPE_LENGTH (new_encl_type)));
2880 }
2881
2882 val->enclosing_type = new_encl_type;
2883 }
2884
2885 /* Given a value ARG1 (offset by OFFSET bytes)
2886 of a struct or union type ARG_TYPE,
2887 extract and return the value of one of its (non-static) fields.
2888 FIELDNO says which field. */
2889
2890 struct value *
2891 value_primitive_field (struct value *arg1, LONGEST offset,
2892 int fieldno, struct type *arg_type)
2893 {
2894 struct value *v;
2895 struct type *type;
2896 struct gdbarch *arch = get_value_arch (arg1);
2897 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2898
2899 arg_type = check_typedef (arg_type);
2900 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2901
2902 /* Call check_typedef on our type to make sure that, if TYPE
2903 is a TYPE_CODE_TYPEDEF, its length is set to the length
2904 of the target type instead of zero. However, we do not
2905 replace the typedef type by the target type, because we want
2906 to keep the typedef in order to be able to print the type
2907 description correctly. */
2908 check_typedef (type);
2909
2910 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2911 {
2912 /* Handle packed fields.
2913
2914 Create a new value for the bitfield, with bitpos and bitsize
2915 set. If possible, arrange offset and bitpos so that we can
2916 do a single aligned read of the size of the containing type.
2917 Otherwise, adjust offset to the byte containing the first
2918 bit. Assume that the address, offset, and embedded offset
2919 are sufficiently aligned. */
2920
2921 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2922 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2923
2924 v = allocate_value_lazy (type);
2925 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2926 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2927 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2928 v->bitpos = bitpos % container_bitsize;
2929 else
2930 v->bitpos = bitpos % 8;
2931 v->offset = (value_embedded_offset (arg1)
2932 + offset
2933 + (bitpos - v->bitpos) / 8);
2934 set_value_parent (v, arg1);
2935 if (!value_lazy (arg1))
2936 value_fetch_lazy (v);
2937 }
2938 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2939 {
2940 /* This field is actually a base subobject, so preserve the
2941 entire object's contents for later references to virtual
2942 bases, etc. */
2943 LONGEST boffset;
2944
2945 /* Lazy register values with offsets are not supported. */
2946 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2947 value_fetch_lazy (arg1);
2948
2949 /* We special case virtual inheritance here because this
2950 requires access to the contents, which we would rather avoid
2951 for references to ordinary fields of unavailable values. */
2952 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2953 boffset = baseclass_offset (arg_type, fieldno,
2954 value_contents (arg1),
2955 value_embedded_offset (arg1),
2956 value_address (arg1),
2957 arg1);
2958 else
2959 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2960
2961 if (value_lazy (arg1))
2962 v = allocate_value_lazy (value_enclosing_type (arg1));
2963 else
2964 {
2965 v = allocate_value (value_enclosing_type (arg1));
2966 value_contents_copy_raw (v, 0, arg1, 0,
2967 TYPE_LENGTH (value_enclosing_type (arg1)));
2968 }
2969 v->type = type;
2970 v->offset = value_offset (arg1);
2971 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2972 }
2973 else if (NULL != TYPE_DATA_LOCATION (type))
2974 {
2975 /* Field is a dynamic data member. */
2976
2977 gdb_assert (0 == offset);
2978 /* We expect an already resolved data location. */
2979 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2980 /* For dynamic data types defer memory allocation
2981 until we actual access the value. */
2982 v = allocate_value_lazy (type);
2983 }
2984 else
2985 {
2986 /* Plain old data member */
2987 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
2988 / (HOST_CHAR_BIT * unit_size));
2989
2990 /* Lazy register values with offsets are not supported. */
2991 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2992 value_fetch_lazy (arg1);
2993
2994 if (value_lazy (arg1))
2995 v = allocate_value_lazy (type);
2996 else
2997 {
2998 v = allocate_value (type);
2999 value_contents_copy_raw (v, value_embedded_offset (v),
3000 arg1, value_embedded_offset (arg1) + offset,
3001 type_length_units (type));
3002 }
3003 v->offset = (value_offset (arg1) + offset
3004 + value_embedded_offset (arg1));
3005 }
3006 set_value_component_location (v, arg1);
3007 return v;
3008 }
3009
3010 /* Given a value ARG1 of a struct or union type,
3011 extract and return the value of one of its (non-static) fields.
3012 FIELDNO says which field. */
3013
3014 struct value *
3015 value_field (struct value *arg1, int fieldno)
3016 {
3017 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3018 }
3019
3020 /* Return a non-virtual function as a value.
3021 F is the list of member functions which contains the desired method.
3022 J is an index into F which provides the desired method.
3023
3024 We only use the symbol for its address, so be happy with either a
3025 full symbol or a minimal symbol. */
3026
3027 struct value *
3028 value_fn_field (struct value **arg1p, struct fn_field *f,
3029 int j, struct type *type,
3030 LONGEST offset)
3031 {
3032 struct value *v;
3033 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3034 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3035 struct symbol *sym;
3036 struct bound_minimal_symbol msym;
3037
3038 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3039 if (sym != NULL)
3040 {
3041 memset (&msym, 0, sizeof (msym));
3042 }
3043 else
3044 {
3045 gdb_assert (sym == NULL);
3046 msym = lookup_bound_minimal_symbol (physname);
3047 if (msym.minsym == NULL)
3048 return NULL;
3049 }
3050
3051 v = allocate_value (ftype);
3052 VALUE_LVAL (v) = lval_memory;
3053 if (sym)
3054 {
3055 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3056 }
3057 else
3058 {
3059 /* The minimal symbol might point to a function descriptor;
3060 resolve it to the actual code address instead. */
3061 struct objfile *objfile = msym.objfile;
3062 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3063
3064 set_value_address (v,
3065 gdbarch_convert_from_func_ptr_addr
3066 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), target_stack));
3067 }
3068
3069 if (arg1p)
3070 {
3071 if (type != value_type (*arg1p))
3072 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3073 value_addr (*arg1p)));
3074
3075 /* Move the `this' pointer according to the offset.
3076 VALUE_OFFSET (*arg1p) += offset; */
3077 }
3078
3079 return v;
3080 }
3081
3082 \f
3083
3084 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3085 VALADDR, and store the result in *RESULT.
3086 The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3087 BITSIZE is zero, then the length is taken from FIELD_TYPE.
3088
3089 Extracting bits depends on endianness of the machine. Compute the
3090 number of least significant bits to discard. For big endian machines,
3091 we compute the total number of bits in the anonymous object, subtract
3092 off the bit count from the MSB of the object to the MSB of the
3093 bitfield, then the size of the bitfield, which leaves the LSB discard
3094 count. For little endian machines, the discard count is simply the
3095 number of bits from the LSB of the anonymous object to the LSB of the
3096 bitfield.
3097
3098 If the field is signed, we also do sign extension. */
3099
3100 static LONGEST
3101 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3102 LONGEST bitpos, LONGEST bitsize)
3103 {
3104 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3105 ULONGEST val;
3106 ULONGEST valmask;
3107 int lsbcount;
3108 LONGEST bytes_read;
3109 LONGEST read_offset;
3110
3111 /* Read the minimum number of bytes required; there may not be
3112 enough bytes to read an entire ULONGEST. */
3113 field_type = check_typedef (field_type);
3114 if (bitsize)
3115 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3116 else
3117 {
3118 bytes_read = TYPE_LENGTH (field_type);
3119 bitsize = 8 * bytes_read;
3120 }
3121
3122 read_offset = bitpos / 8;
3123
3124 val = extract_unsigned_integer (valaddr + read_offset,
3125 bytes_read, byte_order);
3126
3127 /* Extract bits. See comment above. */
3128
3129 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3130 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3131 else
3132 lsbcount = (bitpos % 8);
3133 val >>= lsbcount;
3134
3135 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3136 If the field is signed, and is negative, then sign extend. */
3137
3138 if (bitsize < 8 * (int) sizeof (val))
3139 {
3140 valmask = (((ULONGEST) 1) << bitsize) - 1;
3141 val &= valmask;
3142 if (!TYPE_UNSIGNED (field_type))
3143 {
3144 if (val & (valmask ^ (valmask >> 1)))
3145 {
3146 val |= ~valmask;
3147 }
3148 }
3149 }
3150
3151 return val;
3152 }
3153
3154 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3155 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3156 ORIGINAL_VALUE, which must not be NULL. See
3157 unpack_value_bits_as_long for more details. */
3158
3159 int
3160 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3161 LONGEST embedded_offset, int fieldno,
3162 const struct value *val, LONGEST *result)
3163 {
3164 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3165 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3166 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3167 int bit_offset;
3168
3169 gdb_assert (val != NULL);
3170
3171 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3172 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3173 || !value_bits_available (val, bit_offset, bitsize))
3174 return 0;
3175
3176 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3177 bitpos, bitsize);
3178 return 1;
3179 }
3180
3181 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3182 object at VALADDR. See unpack_bits_as_long for more details. */
3183
3184 LONGEST
3185 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3186 {
3187 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3188 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3189 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3190
3191 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3192 }
3193
3194 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3195 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3196 the contents in DEST_VAL, zero or sign extending if the type of
3197 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3198 VAL. If the VAL's contents required to extract the bitfield from
3199 are unavailable/optimized out, DEST_VAL is correspondingly
3200 marked unavailable/optimized out. */
3201
3202 void
3203 unpack_value_bitfield (struct value *dest_val,
3204 LONGEST bitpos, LONGEST bitsize,
3205 const gdb_byte *valaddr, LONGEST embedded_offset,
3206 const struct value *val)
3207 {
3208 enum bfd_endian byte_order;
3209 int src_bit_offset;
3210 int dst_bit_offset;
3211 struct type *field_type = value_type (dest_val);
3212
3213 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3214
3215 /* First, unpack and sign extend the bitfield as if it was wholly
3216 valid. Optimized out/unavailable bits are read as zero, but
3217 that's OK, as they'll end up marked below. If the VAL is
3218 wholly-invalid we may have skipped allocating its contents,
3219 though. See allocate_optimized_out_value. */
3220 if (valaddr != NULL)
3221 {
3222 LONGEST num;
3223
3224 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3225 bitpos, bitsize);
3226 store_signed_integer (value_contents_raw (dest_val),
3227 TYPE_LENGTH (field_type), byte_order, num);
3228 }
3229
3230 /* Now copy the optimized out / unavailability ranges to the right
3231 bits. */
3232 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3233 if (byte_order == BFD_ENDIAN_BIG)
3234 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3235 else
3236 dst_bit_offset = 0;
3237 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3238 val, src_bit_offset, bitsize);
3239 }
3240
3241 /* Return a new value with type TYPE, which is FIELDNO field of the
3242 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3243 of VAL. If the VAL's contents required to extract the bitfield
3244 from are unavailable/optimized out, the new value is
3245 correspondingly marked unavailable/optimized out. */
3246
3247 struct value *
3248 value_field_bitfield (struct type *type, int fieldno,
3249 const gdb_byte *valaddr,
3250 LONGEST embedded_offset, const struct value *val)
3251 {
3252 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3253 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3254 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3255
3256 unpack_value_bitfield (res_val, bitpos, bitsize,
3257 valaddr, embedded_offset, val);
3258
3259 return res_val;
3260 }
3261
3262 /* Modify the value of a bitfield. ADDR points to a block of memory in
3263 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3264 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3265 indicate which bits (in target bit order) comprise the bitfield.
3266 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3267 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3268
3269 void
3270 modify_field (struct type *type, gdb_byte *addr,
3271 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3272 {
3273 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3274 ULONGEST oword;
3275 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3276 LONGEST bytesize;
3277
3278 /* Normalize BITPOS. */
3279 addr += bitpos / 8;
3280 bitpos %= 8;
3281
3282 /* If a negative fieldval fits in the field in question, chop
3283 off the sign extension bits. */
3284 if ((~fieldval & ~(mask >> 1)) == 0)
3285 fieldval &= mask;
3286
3287 /* Warn if value is too big to fit in the field in question. */
3288 if (0 != (fieldval & ~mask))
3289 {
3290 /* FIXME: would like to include fieldval in the message, but
3291 we don't have a sprintf_longest. */
3292 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3293
3294 /* Truncate it, otherwise adjoining fields may be corrupted. */
3295 fieldval &= mask;
3296 }
3297
3298 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3299 false valgrind reports. */
3300
3301 bytesize = (bitpos + bitsize + 7) / 8;
3302 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3303
3304 /* Shifting for bit field depends on endianness of the target machine. */
3305 if (gdbarch_bits_big_endian (get_type_arch (type)))
3306 bitpos = bytesize * 8 - bitpos - bitsize;
3307
3308 oword &= ~(mask << bitpos);
3309 oword |= fieldval << bitpos;
3310
3311 store_unsigned_integer (addr, bytesize, byte_order, oword);
3312 }
3313 \f
3314 /* Pack NUM into BUF using a target format of TYPE. */
3315
3316 void
3317 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3318 {
3319 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3320 LONGEST len;
3321
3322 type = check_typedef (type);
3323 len = TYPE_LENGTH (type);
3324
3325 switch (TYPE_CODE (type))
3326 {
3327 case TYPE_CODE_INT:
3328 case TYPE_CODE_CHAR:
3329 case TYPE_CODE_ENUM:
3330 case TYPE_CODE_FLAGS:
3331 case TYPE_CODE_BOOL:
3332 case TYPE_CODE_RANGE:
3333 case TYPE_CODE_MEMBERPTR:
3334 store_signed_integer (buf, len, byte_order, num);
3335 break;
3336
3337 case TYPE_CODE_REF:
3338 case TYPE_CODE_RVALUE_REF:
3339 case TYPE_CODE_PTR:
3340 store_typed_address (buf, type, (CORE_ADDR) num);
3341 break;
3342
3343 case TYPE_CODE_FLT:
3344 case TYPE_CODE_DECFLOAT:
3345 target_float_from_longest (buf, type, num);
3346 break;
3347
3348 default:
3349 error (_("Unexpected type (%d) encountered for integer constant."),
3350 TYPE_CODE (type));
3351 }
3352 }
3353
3354
3355 /* Pack NUM into BUF using a target format of TYPE. */
3356
3357 static void
3358 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3359 {
3360 LONGEST len;
3361 enum bfd_endian byte_order;
3362
3363 type = check_typedef (type);
3364 len = TYPE_LENGTH (type);
3365 byte_order = gdbarch_byte_order (get_type_arch (type));
3366
3367 switch (TYPE_CODE (type))
3368 {
3369 case TYPE_CODE_INT:
3370 case TYPE_CODE_CHAR:
3371 case TYPE_CODE_ENUM:
3372 case TYPE_CODE_FLAGS:
3373 case TYPE_CODE_BOOL:
3374 case TYPE_CODE_RANGE:
3375 case TYPE_CODE_MEMBERPTR:
3376 store_unsigned_integer (buf, len, byte_order, num);
3377 break;
3378
3379 case TYPE_CODE_REF:
3380 case TYPE_CODE_RVALUE_REF:
3381 case TYPE_CODE_PTR:
3382 store_typed_address (buf, type, (CORE_ADDR) num);
3383 break;
3384
3385 case TYPE_CODE_FLT:
3386 case TYPE_CODE_DECFLOAT:
3387 target_float_from_ulongest (buf, type, num);
3388 break;
3389
3390 default:
3391 error (_("Unexpected type (%d) encountered "
3392 "for unsigned integer constant."),
3393 TYPE_CODE (type));
3394 }
3395 }
3396
3397
3398 /* Convert C numbers into newly allocated values. */
3399
3400 struct value *
3401 value_from_longest (struct type *type, LONGEST num)
3402 {
3403 struct value *val = allocate_value (type);
3404
3405 pack_long (value_contents_raw (val), type, num);
3406 return val;
3407 }
3408
3409
3410 /* Convert C unsigned numbers into newly allocated values. */
3411
3412 struct value *
3413 value_from_ulongest (struct type *type, ULONGEST num)
3414 {
3415 struct value *val = allocate_value (type);
3416
3417 pack_unsigned_long (value_contents_raw (val), type, num);
3418
3419 return val;
3420 }
3421
3422
3423 /* Create a value representing a pointer of type TYPE to the address
3424 ADDR. */
3425
3426 struct value *
3427 value_from_pointer (struct type *type, CORE_ADDR addr)
3428 {
3429 struct value *val = allocate_value (type);
3430
3431 store_typed_address (value_contents_raw (val),
3432 check_typedef (type), addr);
3433 return val;
3434 }
3435
3436
3437 /* Create a value of type TYPE whose contents come from VALADDR, if it
3438 is non-null, and whose memory address (in the inferior) is
3439 ADDRESS. The type of the created value may differ from the passed
3440 type TYPE. Make sure to retrieve values new type after this call.
3441 Note that TYPE is not passed through resolve_dynamic_type; this is
3442 a special API intended for use only by Ada. */
3443
3444 struct value *
3445 value_from_contents_and_address_unresolved (struct type *type,
3446 const gdb_byte *valaddr,
3447 CORE_ADDR address)
3448 {
3449 struct value *v;
3450
3451 if (valaddr == NULL)
3452 v = allocate_value_lazy (type);
3453 else
3454 v = value_from_contents (type, valaddr);
3455 VALUE_LVAL (v) = lval_memory;
3456 set_value_address (v, address);
3457 return v;
3458 }
3459
3460 /* Create a value of type TYPE whose contents come from VALADDR, if it
3461 is non-null, and whose memory address (in the inferior) is
3462 ADDRESS. The type of the created value may differ from the passed
3463 type TYPE. Make sure to retrieve values new type after this call. */
3464
3465 struct value *
3466 value_from_contents_and_address (struct type *type,
3467 const gdb_byte *valaddr,
3468 CORE_ADDR address)
3469 {
3470 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3471 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3472 struct value *v;
3473
3474 if (valaddr == NULL)
3475 v = allocate_value_lazy (resolved_type);
3476 else
3477 v = value_from_contents (resolved_type, valaddr);
3478 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3479 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3480 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3481 VALUE_LVAL (v) = lval_memory;
3482 set_value_address (v, address);
3483 return v;
3484 }
3485
3486 /* Create a value of type TYPE holding the contents CONTENTS.
3487 The new value is `not_lval'. */
3488
3489 struct value *
3490 value_from_contents (struct type *type, const gdb_byte *contents)
3491 {
3492 struct value *result;
3493
3494 result = allocate_value (type);
3495 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3496 return result;
3497 }
3498
3499 /* Extract a value from the history file. Input will be of the form
3500 $digits or $$digits. See block comment above 'write_dollar_variable'
3501 for details. */
3502
3503 struct value *
3504 value_from_history_ref (const char *h, const char **endp)
3505 {
3506 int index, len;
3507
3508 if (h[0] == '$')
3509 len = 1;
3510 else
3511 return NULL;
3512
3513 if (h[1] == '$')
3514 len = 2;
3515
3516 /* Find length of numeral string. */
3517 for (; isdigit (h[len]); len++)
3518 ;
3519
3520 /* Make sure numeral string is not part of an identifier. */
3521 if (h[len] == '_' || isalpha (h[len]))
3522 return NULL;
3523
3524 /* Now collect the index value. */
3525 if (h[1] == '$')
3526 {
3527 if (len == 2)
3528 {
3529 /* For some bizarre reason, "$$" is equivalent to "$$1",
3530 rather than to "$$0" as it ought to be! */
3531 index = -1;
3532 *endp += len;
3533 }
3534 else
3535 {
3536 char *local_end;
3537
3538 index = -strtol (&h[2], &local_end, 10);
3539 *endp = local_end;
3540 }
3541 }
3542 else
3543 {
3544 if (len == 1)
3545 {
3546 /* "$" is equivalent to "$0". */
3547 index = 0;
3548 *endp += len;
3549 }
3550 else
3551 {
3552 char *local_end;
3553
3554 index = strtol (&h[1], &local_end, 10);
3555 *endp = local_end;
3556 }
3557 }
3558
3559 return access_value_history (index);
3560 }
3561
3562 /* Get the component value (offset by OFFSET bytes) of a struct or
3563 union WHOLE. Component's type is TYPE. */
3564
3565 struct value *
3566 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3567 {
3568 struct value *v;
3569
3570 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3571 v = allocate_value_lazy (type);
3572 else
3573 {
3574 v = allocate_value (type);
3575 value_contents_copy (v, value_embedded_offset (v),
3576 whole, value_embedded_offset (whole) + offset,
3577 type_length_units (type));
3578 }
3579 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3580 set_value_component_location (v, whole);
3581
3582 return v;
3583 }
3584
3585 struct value *
3586 coerce_ref_if_computed (const struct value *arg)
3587 {
3588 const struct lval_funcs *funcs;
3589
3590 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3591 return NULL;
3592
3593 if (value_lval_const (arg) != lval_computed)
3594 return NULL;
3595
3596 funcs = value_computed_funcs (arg);
3597 if (funcs->coerce_ref == NULL)
3598 return NULL;
3599
3600 return funcs->coerce_ref (arg);
3601 }
3602
3603 /* Look at value.h for description. */
3604
3605 struct value *
3606 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3607 const struct type *original_type,
3608 const struct value *original_value)
3609 {
3610 /* Re-adjust type. */
3611 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3612
3613 /* Add embedding info. */
3614 set_value_enclosing_type (value, enc_type);
3615 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3616
3617 /* We may be pointing to an object of some derived type. */
3618 return value_full_object (value, NULL, 0, 0, 0);
3619 }
3620
3621 struct value *
3622 coerce_ref (struct value *arg)
3623 {
3624 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3625 struct value *retval;
3626 struct type *enc_type;
3627
3628 retval = coerce_ref_if_computed (arg);
3629 if (retval)
3630 return retval;
3631
3632 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3633 return arg;
3634
3635 enc_type = check_typedef (value_enclosing_type (arg));
3636 enc_type = TYPE_TARGET_TYPE (enc_type);
3637
3638 retval = value_at_lazy (enc_type,
3639 unpack_pointer (value_type (arg),
3640 value_contents (arg)));
3641 enc_type = value_type (retval);
3642 return readjust_indirect_value_type (retval, enc_type,
3643 value_type_arg_tmp, arg);
3644 }
3645
3646 struct value *
3647 coerce_array (struct value *arg)
3648 {
3649 struct type *type;
3650
3651 arg = coerce_ref (arg);
3652 type = check_typedef (value_type (arg));
3653
3654 switch (TYPE_CODE (type))
3655 {
3656 case TYPE_CODE_ARRAY:
3657 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3658 arg = value_coerce_array (arg);
3659 break;
3660 case TYPE_CODE_FUNC:
3661 arg = value_coerce_function (arg);
3662 break;
3663 }
3664 return arg;
3665 }
3666 \f
3667
3668 /* Return the return value convention that will be used for the
3669 specified type. */
3670
3671 enum return_value_convention
3672 struct_return_convention (struct gdbarch *gdbarch,
3673 struct value *function, struct type *value_type)
3674 {
3675 enum type_code code = TYPE_CODE (value_type);
3676
3677 if (code == TYPE_CODE_ERROR)
3678 error (_("Function return type unknown."));
3679
3680 /* Probe the architecture for the return-value convention. */
3681 return gdbarch_return_value (gdbarch, function, value_type,
3682 NULL, NULL, NULL);
3683 }
3684
3685 /* Return true if the function returning the specified type is using
3686 the convention of returning structures in memory (passing in the
3687 address as a hidden first parameter). */
3688
3689 int
3690 using_struct_return (struct gdbarch *gdbarch,
3691 struct value *function, struct type *value_type)
3692 {
3693 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3694 /* A void return value is never in memory. See also corresponding
3695 code in "print_return_value". */
3696 return 0;
3697
3698 return (struct_return_convention (gdbarch, function, value_type)
3699 != RETURN_VALUE_REGISTER_CONVENTION);
3700 }
3701
3702 /* Set the initialized field in a value struct. */
3703
3704 void
3705 set_value_initialized (struct value *val, int status)
3706 {
3707 val->initialized = status;
3708 }
3709
3710 /* Return the initialized field in a value struct. */
3711
3712 int
3713 value_initialized (const struct value *val)
3714 {
3715 return val->initialized;
3716 }
3717
3718 /* Load the actual content of a lazy value. Fetch the data from the
3719 user's process and clear the lazy flag to indicate that the data in
3720 the buffer is valid.
3721
3722 If the value is zero-length, we avoid calling read_memory, which
3723 would abort. We mark the value as fetched anyway -- all 0 bytes of
3724 it. */
3725
3726 void
3727 value_fetch_lazy (struct value *val)
3728 {
3729 gdb_assert (value_lazy (val));
3730 allocate_value_contents (val);
3731 /* A value is either lazy, or fully fetched. The
3732 availability/validity is only established as we try to fetch a
3733 value. */
3734 gdb_assert (val->optimized_out.empty ());
3735 gdb_assert (val->unavailable.empty ());
3736 if (value_bitsize (val))
3737 {
3738 /* To read a lazy bitfield, read the entire enclosing value. This
3739 prevents reading the same block of (possibly volatile) memory once
3740 per bitfield. It would be even better to read only the containing
3741 word, but we have no way to record that just specific bits of a
3742 value have been fetched. */
3743 struct type *type = check_typedef (value_type (val));
3744 struct value *parent = value_parent (val);
3745
3746 if (value_lazy (parent))
3747 value_fetch_lazy (parent);
3748
3749 unpack_value_bitfield (val,
3750 value_bitpos (val), value_bitsize (val),
3751 value_contents_for_printing (parent),
3752 value_offset (val), parent);
3753 }
3754 else if (VALUE_LVAL (val) == lval_memory)
3755 {
3756 CORE_ADDR addr = value_address (val);
3757 struct type *type = check_typedef (value_enclosing_type (val));
3758
3759 if (TYPE_LENGTH (type))
3760 read_value_memory (val, 0, value_stack (val),
3761 addr, value_contents_all_raw (val),
3762 type_length_units (type));
3763 }
3764 else if (VALUE_LVAL (val) == lval_register)
3765 {
3766 struct frame_info *next_frame;
3767 int regnum;
3768 struct type *type = check_typedef (value_type (val));
3769 struct value *new_val = val, *mark = value_mark ();
3770
3771 /* Offsets are not supported here; lazy register values must
3772 refer to the entire register. */
3773 gdb_assert (value_offset (val) == 0);
3774
3775 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3776 {
3777 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3778
3779 next_frame = frame_find_by_id (next_frame_id);
3780 regnum = VALUE_REGNUM (new_val);
3781
3782 gdb_assert (next_frame != NULL);
3783
3784 /* Convertible register routines are used for multi-register
3785 values and for interpretation in different types
3786 (e.g. float or int from a double register). Lazy
3787 register values should have the register's natural type,
3788 so they do not apply. */
3789 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3790 regnum, type));
3791
3792 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3793 Since a "->next" operation was performed when setting
3794 this field, we do not need to perform a "next" operation
3795 again when unwinding the register. That's why
3796 frame_unwind_register_value() is called here instead of
3797 get_frame_register_value(). */
3798 new_val = frame_unwind_register_value (next_frame, regnum);
3799
3800 /* If we get another lazy lval_register value, it means the
3801 register is found by reading it from NEXT_FRAME's next frame.
3802 frame_unwind_register_value should never return a value with
3803 the frame id pointing to NEXT_FRAME. If it does, it means we
3804 either have two consecutive frames with the same frame id
3805 in the frame chain, or some code is trying to unwind
3806 behind get_prev_frame's back (e.g., a frame unwind
3807 sniffer trying to unwind), bypassing its validations. In
3808 any case, it should always be an internal error to end up
3809 in this situation. */
3810 if (VALUE_LVAL (new_val) == lval_register
3811 && value_lazy (new_val)
3812 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3813 internal_error (__FILE__, __LINE__,
3814 _("infinite loop while fetching a register"));
3815 }
3816
3817 /* If it's still lazy (for instance, a saved register on the
3818 stack), fetch it. */
3819 if (value_lazy (new_val))
3820 value_fetch_lazy (new_val);
3821
3822 /* Copy the contents and the unavailability/optimized-out
3823 meta-data from NEW_VAL to VAL. */
3824 set_value_lazy (val, 0);
3825 value_contents_copy (val, value_embedded_offset (val),
3826 new_val, value_embedded_offset (new_val),
3827 type_length_units (type));
3828
3829 if (frame_debug)
3830 {
3831 struct gdbarch *gdbarch;
3832 struct frame_info *frame;
3833 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3834 so that the frame level will be shown correctly. */
3835 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3836 regnum = VALUE_REGNUM (val);
3837 gdbarch = get_frame_arch (frame);
3838
3839 fprintf_unfiltered (gdb_stdlog,
3840 "{ value_fetch_lazy "
3841 "(frame=%d,regnum=%d(%s),...) ",
3842 frame_relative_level (frame), regnum,
3843 user_reg_map_regnum_to_name (gdbarch, regnum));
3844
3845 fprintf_unfiltered (gdb_stdlog, "->");
3846 if (value_optimized_out (new_val))
3847 {
3848 fprintf_unfiltered (gdb_stdlog, " ");
3849 val_print_optimized_out (new_val, gdb_stdlog);
3850 }
3851 else
3852 {
3853 int i;
3854 const gdb_byte *buf = value_contents (new_val);
3855
3856 if (VALUE_LVAL (new_val) == lval_register)
3857 fprintf_unfiltered (gdb_stdlog, " register=%d",
3858 VALUE_REGNUM (new_val));
3859 else if (VALUE_LVAL (new_val) == lval_memory)
3860 fprintf_unfiltered (gdb_stdlog, " address=%s",
3861 paddress (gdbarch,
3862 value_address (new_val)));
3863 else
3864 fprintf_unfiltered (gdb_stdlog, " computed");
3865
3866 fprintf_unfiltered (gdb_stdlog, " bytes=");
3867 fprintf_unfiltered (gdb_stdlog, "[");
3868 for (i = 0; i < register_size (gdbarch, regnum); i++)
3869 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3870 fprintf_unfiltered (gdb_stdlog, "]");
3871 }
3872
3873 fprintf_unfiltered (gdb_stdlog, " }\n");
3874 }
3875
3876 /* Dispose of the intermediate values. This prevents
3877 watchpoints from trying to watch the saved frame pointer. */
3878 value_free_to_mark (mark);
3879 }
3880 else if (VALUE_LVAL (val) == lval_computed
3881 && value_computed_funcs (val)->read != NULL)
3882 value_computed_funcs (val)->read (val);
3883 else
3884 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3885
3886 set_value_lazy (val, 0);
3887 }
3888
3889 /* Implementation of the convenience function $_isvoid. */
3890
3891 static struct value *
3892 isvoid_internal_fn (struct gdbarch *gdbarch,
3893 const struct language_defn *language,
3894 void *cookie, int argc, struct value **argv)
3895 {
3896 int ret;
3897
3898 if (argc != 1)
3899 error (_("You must provide one argument for $_isvoid."));
3900
3901 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3902
3903 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3904 }
3905
3906 #if GDB_SELF_TEST
3907 namespace selftests
3908 {
3909
3910 /* Test the ranges_contain function. */
3911
3912 static void
3913 test_ranges_contain ()
3914 {
3915 std::vector<range> ranges;
3916 range r;
3917
3918 /* [10, 14] */
3919 r.offset = 10;
3920 r.length = 5;
3921 ranges.push_back (r);
3922
3923 /* [20, 24] */
3924 r.offset = 20;
3925 r.length = 5;
3926 ranges.push_back (r);
3927
3928 /* [2, 6] */
3929 SELF_CHECK (!ranges_contain (ranges, 2, 5));
3930 /* [9, 13] */
3931 SELF_CHECK (ranges_contain (ranges, 9, 5));
3932 /* [10, 11] */
3933 SELF_CHECK (ranges_contain (ranges, 10, 2));
3934 /* [10, 14] */
3935 SELF_CHECK (ranges_contain (ranges, 10, 5));
3936 /* [13, 18] */
3937 SELF_CHECK (ranges_contain (ranges, 13, 6));
3938 /* [14, 18] */
3939 SELF_CHECK (ranges_contain (ranges, 14, 5));
3940 /* [15, 18] */
3941 SELF_CHECK (!ranges_contain (ranges, 15, 4));
3942 /* [16, 19] */
3943 SELF_CHECK (!ranges_contain (ranges, 16, 4));
3944 /* [16, 21] */
3945 SELF_CHECK (ranges_contain (ranges, 16, 6));
3946 /* [21, 21] */
3947 SELF_CHECK (ranges_contain (ranges, 21, 1));
3948 /* [21, 25] */
3949 SELF_CHECK (ranges_contain (ranges, 21, 5));
3950 /* [26, 28] */
3951 SELF_CHECK (!ranges_contain (ranges, 26, 3));
3952 }
3953
3954 /* Check that RANGES contains the same ranges as EXPECTED. */
3955
3956 static bool
3957 check_ranges_vector (gdb::array_view<const range> ranges,
3958 gdb::array_view<const range> expected)
3959 {
3960 return ranges == expected;
3961 }
3962
3963 /* Test the insert_into_bit_range_vector function. */
3964
3965 static void
3966 test_insert_into_bit_range_vector ()
3967 {
3968 std::vector<range> ranges;
3969
3970 /* [10, 14] */
3971 {
3972 insert_into_bit_range_vector (&ranges, 10, 5);
3973 static const range expected[] = {
3974 {10, 5}
3975 };
3976 SELF_CHECK (check_ranges_vector (ranges, expected));
3977 }
3978
3979 /* [10, 14] */
3980 {
3981 insert_into_bit_range_vector (&ranges, 11, 4);
3982 static const range expected = {10, 5};
3983 SELF_CHECK (check_ranges_vector (ranges, expected));
3984 }
3985
3986 /* [10, 14] [20, 24] */
3987 {
3988 insert_into_bit_range_vector (&ranges, 20, 5);
3989 static const range expected[] = {
3990 {10, 5},
3991 {20, 5},
3992 };
3993 SELF_CHECK (check_ranges_vector (ranges, expected));
3994 }
3995
3996 /* [10, 14] [17, 24] */
3997 {
3998 insert_into_bit_range_vector (&ranges, 17, 5);
3999 static const range expected[] = {
4000 {10, 5},
4001 {17, 8},
4002 };
4003 SELF_CHECK (check_ranges_vector (ranges, expected));
4004 }
4005
4006 /* [2, 8] [10, 14] [17, 24] */
4007 {
4008 insert_into_bit_range_vector (&ranges, 2, 7);
4009 static const range expected[] = {
4010 {2, 7},
4011 {10, 5},
4012 {17, 8},
4013 };
4014 SELF_CHECK (check_ranges_vector (ranges, expected));
4015 }
4016
4017 /* [2, 14] [17, 24] */
4018 {
4019 insert_into_bit_range_vector (&ranges, 9, 1);
4020 static const range expected[] = {
4021 {2, 13},
4022 {17, 8},
4023 };
4024 SELF_CHECK (check_ranges_vector (ranges, expected));
4025 }
4026
4027 /* [2, 14] [17, 24] */
4028 {
4029 insert_into_bit_range_vector (&ranges, 9, 1);
4030 static const range expected[] = {
4031 {2, 13},
4032 {17, 8},
4033 };
4034 SELF_CHECK (check_ranges_vector (ranges, expected));
4035 }
4036
4037 /* [2, 33] */
4038 {
4039 insert_into_bit_range_vector (&ranges, 4, 30);
4040 static const range expected = {2, 32};
4041 SELF_CHECK (check_ranges_vector (ranges, expected));
4042 }
4043 }
4044
4045 } /* namespace selftests */
4046 #endif /* GDB_SELF_TEST */
4047
4048 void
4049 _initialize_values (void)
4050 {
4051 add_cmd ("convenience", no_class, show_convenience, _("\
4052 Debugger convenience (\"$foo\") variables and functions.\n\
4053 Convenience variables are created when you assign them values;\n\
4054 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4055 \n\
4056 A few convenience variables are given values automatically:\n\
4057 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4058 \"$__\" holds the contents of the last address examined with \"x\"."
4059 #ifdef HAVE_PYTHON
4060 "\n\n\
4061 Convenience functions are defined via the Python API."
4062 #endif
4063 ), &showlist);
4064 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4065
4066 add_cmd ("values", no_set_class, show_values, _("\
4067 Elements of value history around item number IDX (or last ten)."),
4068 &showlist);
4069
4070 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4071 Initialize a convenience variable if necessary.\n\
4072 init-if-undefined VARIABLE = EXPRESSION\n\
4073 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4074 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4075 VARIABLE is already initialized."));
4076
4077 add_prefix_cmd ("function", no_class, function_command, _("\
4078 Placeholder command for showing help on convenience functions."),
4079 &functionlist, "function ", 0, &cmdlist);
4080
4081 add_internal_function ("_isvoid", _("\
4082 Check whether an expression is void.\n\
4083 Usage: $_isvoid (expression)\n\
4084 Return 1 if the expression is void, zero otherwise."),
4085 isvoid_internal_fn, NULL);
4086
4087 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4088 class_support, &max_value_size, _("\
4089 Set maximum sized value gdb will load from the inferior."), _("\
4090 Show maximum sized value gdb will load from the inferior."), _("\
4091 Use this to control the maximum size, in bytes, of a value that gdb\n\
4092 will load from the inferior. Setting this value to 'unlimited'\n\
4093 disables checking.\n\
4094 Setting this does not invalidate already allocated values, it only\n\
4095 prevents future values, larger than this size, from being allocated."),
4096 set_max_value_size,
4097 show_max_value_size,
4098 &setlist, &showlist);
4099 #if GDB_SELF_TEST
4100 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4101 selftests::register_test ("insert_into_bit_range_vector",
4102 selftests::test_insert_into_bit_range_vector);
4103 #endif
4104 }
This page took 0.164262 seconds and 4 git commands to generate.