Adapt `info probes' to support printing probes of different types.
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in bytes.
238 If lval == lval_memory, this is an offset to the address. If
239 lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the
241 member embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in bytes. The
295 value_contents() macro takes `embedded_offset' into account, so
296 most GDB code continues to see the `type' portion of the value,
297 just as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in bytes from the full object to the pointed-to object
302 -- that is, the value `embedded_offset' would have if we followed
303 the pointer and fetched the complete object. (I don't really see
304 the point. Why not just determine the run-time type when you
305 indirect, and avoid the special case? The contents don't matter
306 until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 int
344 value_bits_available (const struct value *value, int offset, int length)
345 {
346 gdb_assert (!value->lazy);
347
348 return !ranges_contain (value->unavailable, offset, length);
349 }
350
351 int
352 value_bytes_available (const struct value *value, int offset, int length)
353 {
354 return value_bits_available (value,
355 offset * TARGET_CHAR_BIT,
356 length * TARGET_CHAR_BIT);
357 }
358
359 int
360 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
361 {
362 gdb_assert (!value->lazy);
363
364 return ranges_contain (value->optimized_out, bit_offset, bit_length);
365 }
366
367 int
368 value_entirely_available (struct value *value)
369 {
370 /* We can only tell whether the whole value is available when we try
371 to read it. */
372 if (value->lazy)
373 value_fetch_lazy (value);
374
375 if (VEC_empty (range_s, value->unavailable))
376 return 1;
377 return 0;
378 }
379
380 /* Returns true if VALUE is entirely covered by RANGES. If the value
381 is lazy, it'll be read now. Note that RANGE is a pointer to
382 pointer because reading the value might change *RANGE. */
383
384 static int
385 value_entirely_covered_by_range_vector (struct value *value,
386 VEC(range_s) **ranges)
387 {
388 /* We can only tell whether the whole value is optimized out /
389 unavailable when we try to read it. */
390 if (value->lazy)
391 value_fetch_lazy (value);
392
393 if (VEC_length (range_s, *ranges) == 1)
394 {
395 struct range *t = VEC_index (range_s, *ranges, 0);
396
397 if (t->offset == 0
398 && t->length == (TARGET_CHAR_BIT
399 * TYPE_LENGTH (value_enclosing_type (value))))
400 return 1;
401 }
402
403 return 0;
404 }
405
406 int
407 value_entirely_unavailable (struct value *value)
408 {
409 return value_entirely_covered_by_range_vector (value, &value->unavailable);
410 }
411
412 int
413 value_entirely_optimized_out (struct value *value)
414 {
415 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
416 }
417
418 /* Insert into the vector pointed to by VECTORP the bit range starting of
419 OFFSET bits, and extending for the next LENGTH bits. */
420
421 static void
422 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
423 {
424 range_s newr;
425 int i;
426
427 /* Insert the range sorted. If there's overlap or the new range
428 would be contiguous with an existing range, merge. */
429
430 newr.offset = offset;
431 newr.length = length;
432
433 /* Do a binary search for the position the given range would be
434 inserted if we only considered the starting OFFSET of ranges.
435 Call that position I. Since we also have LENGTH to care for
436 (this is a range afterall), we need to check if the _previous_
437 range overlaps the I range. E.g., calling R the new range:
438
439 #1 - overlaps with previous
440
441 R
442 |-...-|
443 |---| |---| |------| ... |--|
444 0 1 2 N
445
446 I=1
447
448 In the case #1 above, the binary search would return `I=1',
449 meaning, this OFFSET should be inserted at position 1, and the
450 current position 1 should be pushed further (and become 2). But,
451 note that `0' overlaps with R, so we want to merge them.
452
453 A similar consideration needs to be taken if the new range would
454 be contiguous with the previous range:
455
456 #2 - contiguous with previous
457
458 R
459 |-...-|
460 |--| |---| |------| ... |--|
461 0 1 2 N
462
463 I=1
464
465 If there's no overlap with the previous range, as in:
466
467 #3 - not overlapping and not contiguous
468
469 R
470 |-...-|
471 |--| |---| |------| ... |--|
472 0 1 2 N
473
474 I=1
475
476 or if I is 0:
477
478 #4 - R is the range with lowest offset
479
480 R
481 |-...-|
482 |--| |---| |------| ... |--|
483 0 1 2 N
484
485 I=0
486
487 ... we just push the new range to I.
488
489 All the 4 cases above need to consider that the new range may
490 also overlap several of the ranges that follow, or that R may be
491 contiguous with the following range, and merge. E.g.,
492
493 #5 - overlapping following ranges
494
495 R
496 |------------------------|
497 |--| |---| |------| ... |--|
498 0 1 2 N
499
500 I=0
501
502 or:
503
504 R
505 |-------|
506 |--| |---| |------| ... |--|
507 0 1 2 N
508
509 I=1
510
511 */
512
513 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
514 if (i > 0)
515 {
516 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
517
518 if (ranges_overlap (bef->offset, bef->length, offset, length))
519 {
520 /* #1 */
521 ULONGEST l = min (bef->offset, offset);
522 ULONGEST h = max (bef->offset + bef->length, offset + length);
523
524 bef->offset = l;
525 bef->length = h - l;
526 i--;
527 }
528 else if (offset == bef->offset + bef->length)
529 {
530 /* #2 */
531 bef->length += length;
532 i--;
533 }
534 else
535 {
536 /* #3 */
537 VEC_safe_insert (range_s, *vectorp, i, &newr);
538 }
539 }
540 else
541 {
542 /* #4 */
543 VEC_safe_insert (range_s, *vectorp, i, &newr);
544 }
545
546 /* Check whether the ranges following the one we've just added or
547 touched can be folded in (#5 above). */
548 if (i + 1 < VEC_length (range_s, *vectorp))
549 {
550 struct range *t;
551 struct range *r;
552 int removed = 0;
553 int next = i + 1;
554
555 /* Get the range we just touched. */
556 t = VEC_index (range_s, *vectorp, i);
557 removed = 0;
558
559 i = next;
560 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
561 if (r->offset <= t->offset + t->length)
562 {
563 ULONGEST l, h;
564
565 l = min (t->offset, r->offset);
566 h = max (t->offset + t->length, r->offset + r->length);
567
568 t->offset = l;
569 t->length = h - l;
570
571 removed++;
572 }
573 else
574 {
575 /* If we couldn't merge this one, we won't be able to
576 merge following ones either, since the ranges are
577 always sorted by OFFSET. */
578 break;
579 }
580
581 if (removed != 0)
582 VEC_block_remove (range_s, *vectorp, next, removed);
583 }
584 }
585
586 void
587 mark_value_bits_unavailable (struct value *value, int offset, int length)
588 {
589 insert_into_bit_range_vector (&value->unavailable, offset, length);
590 }
591
592 void
593 mark_value_bytes_unavailable (struct value *value, int offset, int length)
594 {
595 mark_value_bits_unavailable (value,
596 offset * TARGET_CHAR_BIT,
597 length * TARGET_CHAR_BIT);
598 }
599
600 /* Find the first range in RANGES that overlaps the range defined by
601 OFFSET and LENGTH, starting at element POS in the RANGES vector,
602 Returns the index into RANGES where such overlapping range was
603 found, or -1 if none was found. */
604
605 static int
606 find_first_range_overlap (VEC(range_s) *ranges, int pos,
607 int offset, int length)
608 {
609 range_s *r;
610 int i;
611
612 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
613 if (ranges_overlap (r->offset, r->length, offset, length))
614 return i;
615
616 return -1;
617 }
618
619 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
620 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
621 return non-zero.
622
623 It must always be the case that:
624 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
625
626 It is assumed that memory can be accessed from:
627 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
628 to:
629 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
630 / TARGET_CHAR_BIT) */
631 static int
632 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
633 const gdb_byte *ptr2, size_t offset2_bits,
634 size_t length_bits)
635 {
636 gdb_assert (offset1_bits % TARGET_CHAR_BIT
637 == offset2_bits % TARGET_CHAR_BIT);
638
639 if (offset1_bits % TARGET_CHAR_BIT != 0)
640 {
641 size_t bits;
642 gdb_byte mask, b1, b2;
643
644 /* The offset from the base pointers PTR1 and PTR2 is not a complete
645 number of bytes. A number of bits up to either the next exact
646 byte boundary, or LENGTH_BITS (which ever is sooner) will be
647 compared. */
648 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
649 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
650 mask = (1 << bits) - 1;
651
652 if (length_bits < bits)
653 {
654 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
655 bits = length_bits;
656 }
657
658 /* Now load the two bytes and mask off the bits we care about. */
659 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
660 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
661
662 if (b1 != b2)
663 return 1;
664
665 /* Now update the length and offsets to take account of the bits
666 we've just compared. */
667 length_bits -= bits;
668 offset1_bits += bits;
669 offset2_bits += bits;
670 }
671
672 if (length_bits % TARGET_CHAR_BIT != 0)
673 {
674 size_t bits;
675 size_t o1, o2;
676 gdb_byte mask, b1, b2;
677
678 /* The length is not an exact number of bytes. After the previous
679 IF.. block then the offsets are byte aligned, or the
680 length is zero (in which case this code is not reached). Compare
681 a number of bits at the end of the region, starting from an exact
682 byte boundary. */
683 bits = length_bits % TARGET_CHAR_BIT;
684 o1 = offset1_bits + length_bits - bits;
685 o2 = offset2_bits + length_bits - bits;
686
687 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
688 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
689
690 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
691 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
692
693 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
694 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
695
696 if (b1 != b2)
697 return 1;
698
699 length_bits -= bits;
700 }
701
702 if (length_bits > 0)
703 {
704 /* We've now taken care of any stray "bits" at the start, or end of
705 the region to compare, the remainder can be covered with a simple
706 memcmp. */
707 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
708 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
709 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
710
711 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
712 ptr2 + offset2_bits / TARGET_CHAR_BIT,
713 length_bits / TARGET_CHAR_BIT);
714 }
715
716 /* Length is zero, regions match. */
717 return 0;
718 }
719
720 /* Helper struct for find_first_range_overlap_and_match and
721 value_contents_bits_eq. Keep track of which slot of a given ranges
722 vector have we last looked at. */
723
724 struct ranges_and_idx
725 {
726 /* The ranges. */
727 VEC(range_s) *ranges;
728
729 /* The range we've last found in RANGES. Given ranges are sorted,
730 we can start the next lookup here. */
731 int idx;
732 };
733
734 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
735 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
736 ranges starting at OFFSET2 bits. Return true if the ranges match
737 and fill in *L and *H with the overlapping window relative to
738 (both) OFFSET1 or OFFSET2. */
739
740 static int
741 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
742 struct ranges_and_idx *rp2,
743 int offset1, int offset2,
744 int length, ULONGEST *l, ULONGEST *h)
745 {
746 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
747 offset1, length);
748 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
749 offset2, length);
750
751 if (rp1->idx == -1 && rp2->idx == -1)
752 {
753 *l = length;
754 *h = length;
755 return 1;
756 }
757 else if (rp1->idx == -1 || rp2->idx == -1)
758 return 0;
759 else
760 {
761 range_s *r1, *r2;
762 ULONGEST l1, h1;
763 ULONGEST l2, h2;
764
765 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
766 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
767
768 /* Get the unavailable windows intersected by the incoming
769 ranges. The first and last ranges that overlap the argument
770 range may be wider than said incoming arguments ranges. */
771 l1 = max (offset1, r1->offset);
772 h1 = min (offset1 + length, r1->offset + r1->length);
773
774 l2 = max (offset2, r2->offset);
775 h2 = min (offset2 + length, offset2 + r2->length);
776
777 /* Make them relative to the respective start offsets, so we can
778 compare them for equality. */
779 l1 -= offset1;
780 h1 -= offset1;
781
782 l2 -= offset2;
783 h2 -= offset2;
784
785 /* Different ranges, no match. */
786 if (l1 != l2 || h1 != h2)
787 return 0;
788
789 *h = h1;
790 *l = l1;
791 return 1;
792 }
793 }
794
795 /* Helper function for value_contents_eq. The only difference is that
796 this function is bit rather than byte based.
797
798 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
799 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
800 Return true if the available bits match. */
801
802 static int
803 value_contents_bits_eq (const struct value *val1, int offset1,
804 const struct value *val2, int offset2,
805 int length)
806 {
807 /* Each array element corresponds to a ranges source (unavailable,
808 optimized out). '1' is for VAL1, '2' for VAL2. */
809 struct ranges_and_idx rp1[2], rp2[2];
810
811 /* See function description in value.h. */
812 gdb_assert (!val1->lazy && !val2->lazy);
813
814 /* We shouldn't be trying to compare past the end of the values. */
815 gdb_assert (offset1 + length
816 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
817 gdb_assert (offset2 + length
818 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
819
820 memset (&rp1, 0, sizeof (rp1));
821 memset (&rp2, 0, sizeof (rp2));
822 rp1[0].ranges = val1->unavailable;
823 rp2[0].ranges = val2->unavailable;
824 rp1[1].ranges = val1->optimized_out;
825 rp2[1].ranges = val2->optimized_out;
826
827 while (length > 0)
828 {
829 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
830 int i;
831
832 for (i = 0; i < 2; i++)
833 {
834 ULONGEST l_tmp, h_tmp;
835
836 /* The contents only match equal if the invalid/unavailable
837 contents ranges match as well. */
838 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
839 offset1, offset2, length,
840 &l_tmp, &h_tmp))
841 return 0;
842
843 /* We're interested in the lowest/first range found. */
844 if (i == 0 || l_tmp < l)
845 {
846 l = l_tmp;
847 h = h_tmp;
848 }
849 }
850
851 /* Compare the available/valid contents. */
852 if (memcmp_with_bit_offsets (val1->contents, offset1,
853 val2->contents, offset2, l) != 0)
854 return 0;
855
856 length -= h;
857 offset1 += h;
858 offset2 += h;
859 }
860
861 return 1;
862 }
863
864 int
865 value_contents_eq (const struct value *val1, int offset1,
866 const struct value *val2, int offset2,
867 int length)
868 {
869 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
870 val2, offset2 * TARGET_CHAR_BIT,
871 length * TARGET_CHAR_BIT);
872 }
873
874 /* Prototypes for local functions. */
875
876 static void show_values (char *, int);
877
878 static void show_convenience (char *, int);
879
880
881 /* The value-history records all the values printed
882 by print commands during this session. Each chunk
883 records 60 consecutive values. The first chunk on
884 the chain records the most recent values.
885 The total number of values is in value_history_count. */
886
887 #define VALUE_HISTORY_CHUNK 60
888
889 struct value_history_chunk
890 {
891 struct value_history_chunk *next;
892 struct value *values[VALUE_HISTORY_CHUNK];
893 };
894
895 /* Chain of chunks now in use. */
896
897 static struct value_history_chunk *value_history_chain;
898
899 static int value_history_count; /* Abs number of last entry stored. */
900
901 \f
902 /* List of all value objects currently allocated
903 (except for those released by calls to release_value)
904 This is so they can be freed after each command. */
905
906 static struct value *all_values;
907
908 /* Allocate a lazy value for type TYPE. Its actual content is
909 "lazily" allocated too: the content field of the return value is
910 NULL; it will be allocated when it is fetched from the target. */
911
912 struct value *
913 allocate_value_lazy (struct type *type)
914 {
915 struct value *val;
916
917 /* Call check_typedef on our type to make sure that, if TYPE
918 is a TYPE_CODE_TYPEDEF, its length is set to the length
919 of the target type instead of zero. However, we do not
920 replace the typedef type by the target type, because we want
921 to keep the typedef in order to be able to set the VAL's type
922 description correctly. */
923 check_typedef (type);
924
925 val = (struct value *) xzalloc (sizeof (struct value));
926 val->contents = NULL;
927 val->next = all_values;
928 all_values = val;
929 val->type = type;
930 val->enclosing_type = type;
931 VALUE_LVAL (val) = not_lval;
932 val->location.address = 0;
933 VALUE_FRAME_ID (val) = null_frame_id;
934 val->offset = 0;
935 val->bitpos = 0;
936 val->bitsize = 0;
937 VALUE_REGNUM (val) = -1;
938 val->lazy = 1;
939 val->embedded_offset = 0;
940 val->pointed_to_offset = 0;
941 val->modifiable = 1;
942 val->initialized = 1; /* Default to initialized. */
943
944 /* Values start out on the all_values chain. */
945 val->reference_count = 1;
946
947 return val;
948 }
949
950 /* Allocate the contents of VAL if it has not been allocated yet. */
951
952 static void
953 allocate_value_contents (struct value *val)
954 {
955 if (!val->contents)
956 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
957 }
958
959 /* Allocate a value and its contents for type TYPE. */
960
961 struct value *
962 allocate_value (struct type *type)
963 {
964 struct value *val = allocate_value_lazy (type);
965
966 allocate_value_contents (val);
967 val->lazy = 0;
968 return val;
969 }
970
971 /* Allocate a value that has the correct length
972 for COUNT repetitions of type TYPE. */
973
974 struct value *
975 allocate_repeat_value (struct type *type, int count)
976 {
977 int low_bound = current_language->string_lower_bound; /* ??? */
978 /* FIXME-type-allocation: need a way to free this type when we are
979 done with it. */
980 struct type *array_type
981 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
982
983 return allocate_value (array_type);
984 }
985
986 struct value *
987 allocate_computed_value (struct type *type,
988 const struct lval_funcs *funcs,
989 void *closure)
990 {
991 struct value *v = allocate_value_lazy (type);
992
993 VALUE_LVAL (v) = lval_computed;
994 v->location.computed.funcs = funcs;
995 v->location.computed.closure = closure;
996
997 return v;
998 }
999
1000 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1001
1002 struct value *
1003 allocate_optimized_out_value (struct type *type)
1004 {
1005 struct value *retval = allocate_value_lazy (type);
1006
1007 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1008 set_value_lazy (retval, 0);
1009 return retval;
1010 }
1011
1012 /* Accessor methods. */
1013
1014 struct value *
1015 value_next (struct value *value)
1016 {
1017 return value->next;
1018 }
1019
1020 struct type *
1021 value_type (const struct value *value)
1022 {
1023 return value->type;
1024 }
1025 void
1026 deprecated_set_value_type (struct value *value, struct type *type)
1027 {
1028 value->type = type;
1029 }
1030
1031 int
1032 value_offset (const struct value *value)
1033 {
1034 return value->offset;
1035 }
1036 void
1037 set_value_offset (struct value *value, int offset)
1038 {
1039 value->offset = offset;
1040 }
1041
1042 int
1043 value_bitpos (const struct value *value)
1044 {
1045 return value->bitpos;
1046 }
1047 void
1048 set_value_bitpos (struct value *value, int bit)
1049 {
1050 value->bitpos = bit;
1051 }
1052
1053 int
1054 value_bitsize (const struct value *value)
1055 {
1056 return value->bitsize;
1057 }
1058 void
1059 set_value_bitsize (struct value *value, int bit)
1060 {
1061 value->bitsize = bit;
1062 }
1063
1064 struct value *
1065 value_parent (struct value *value)
1066 {
1067 return value->parent;
1068 }
1069
1070 /* See value.h. */
1071
1072 void
1073 set_value_parent (struct value *value, struct value *parent)
1074 {
1075 struct value *old = value->parent;
1076
1077 value->parent = parent;
1078 if (parent != NULL)
1079 value_incref (parent);
1080 value_free (old);
1081 }
1082
1083 gdb_byte *
1084 value_contents_raw (struct value *value)
1085 {
1086 allocate_value_contents (value);
1087 return value->contents + value->embedded_offset;
1088 }
1089
1090 gdb_byte *
1091 value_contents_all_raw (struct value *value)
1092 {
1093 allocate_value_contents (value);
1094 return value->contents;
1095 }
1096
1097 struct type *
1098 value_enclosing_type (struct value *value)
1099 {
1100 return value->enclosing_type;
1101 }
1102
1103 /* Look at value.h for description. */
1104
1105 struct type *
1106 value_actual_type (struct value *value, int resolve_simple_types,
1107 int *real_type_found)
1108 {
1109 struct value_print_options opts;
1110 struct type *result;
1111
1112 get_user_print_options (&opts);
1113
1114 if (real_type_found)
1115 *real_type_found = 0;
1116 result = value_type (value);
1117 if (opts.objectprint)
1118 {
1119 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1120 fetch its rtti type. */
1121 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1122 || TYPE_CODE (result) == TYPE_CODE_REF)
1123 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1124 == TYPE_CODE_STRUCT)
1125 {
1126 struct type *real_type;
1127
1128 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1129 if (real_type)
1130 {
1131 if (real_type_found)
1132 *real_type_found = 1;
1133 result = real_type;
1134 }
1135 }
1136 else if (resolve_simple_types)
1137 {
1138 if (real_type_found)
1139 *real_type_found = 1;
1140 result = value_enclosing_type (value);
1141 }
1142 }
1143
1144 return result;
1145 }
1146
1147 void
1148 error_value_optimized_out (void)
1149 {
1150 error (_("value has been optimized out"));
1151 }
1152
1153 static void
1154 require_not_optimized_out (const struct value *value)
1155 {
1156 if (!VEC_empty (range_s, value->optimized_out))
1157 {
1158 if (value->lval == lval_register)
1159 error (_("register has not been saved in frame"));
1160 else
1161 error_value_optimized_out ();
1162 }
1163 }
1164
1165 static void
1166 require_available (const struct value *value)
1167 {
1168 if (!VEC_empty (range_s, value->unavailable))
1169 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1170 }
1171
1172 const gdb_byte *
1173 value_contents_for_printing (struct value *value)
1174 {
1175 if (value->lazy)
1176 value_fetch_lazy (value);
1177 return value->contents;
1178 }
1179
1180 const gdb_byte *
1181 value_contents_for_printing_const (const struct value *value)
1182 {
1183 gdb_assert (!value->lazy);
1184 return value->contents;
1185 }
1186
1187 const gdb_byte *
1188 value_contents_all (struct value *value)
1189 {
1190 const gdb_byte *result = value_contents_for_printing (value);
1191 require_not_optimized_out (value);
1192 require_available (value);
1193 return result;
1194 }
1195
1196 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1197 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1198
1199 static void
1200 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1201 VEC (range_s) *src_range, int src_bit_offset,
1202 int bit_length)
1203 {
1204 range_s *r;
1205 int i;
1206
1207 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1208 {
1209 ULONGEST h, l;
1210
1211 l = max (r->offset, src_bit_offset);
1212 h = min (r->offset + r->length, src_bit_offset + bit_length);
1213
1214 if (l < h)
1215 insert_into_bit_range_vector (dst_range,
1216 dst_bit_offset + (l - src_bit_offset),
1217 h - l);
1218 }
1219 }
1220
1221 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1222 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1223
1224 static void
1225 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1226 const struct value *src, int src_bit_offset,
1227 int bit_length)
1228 {
1229 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1230 src->unavailable, src_bit_offset,
1231 bit_length);
1232 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1233 src->optimized_out, src_bit_offset,
1234 bit_length);
1235 }
1236
1237 /* Copy LENGTH bytes of SRC value's (all) contents
1238 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1239 contents, starting at DST_OFFSET. If unavailable contents are
1240 being copied from SRC, the corresponding DST contents are marked
1241 unavailable accordingly. Neither DST nor SRC may be lazy
1242 values.
1243
1244 It is assumed the contents of DST in the [DST_OFFSET,
1245 DST_OFFSET+LENGTH) range are wholly available. */
1246
1247 void
1248 value_contents_copy_raw (struct value *dst, int dst_offset,
1249 struct value *src, int src_offset, int length)
1250 {
1251 range_s *r;
1252 int i;
1253 int src_bit_offset, dst_bit_offset, bit_length;
1254
1255 /* A lazy DST would make that this copy operation useless, since as
1256 soon as DST's contents were un-lazied (by a later value_contents
1257 call, say), the contents would be overwritten. A lazy SRC would
1258 mean we'd be copying garbage. */
1259 gdb_assert (!dst->lazy && !src->lazy);
1260
1261 /* The overwritten DST range gets unavailability ORed in, not
1262 replaced. Make sure to remember to implement replacing if it
1263 turns out actually necessary. */
1264 gdb_assert (value_bytes_available (dst, dst_offset, length));
1265 gdb_assert (!value_bits_any_optimized_out (dst,
1266 TARGET_CHAR_BIT * dst_offset,
1267 TARGET_CHAR_BIT * length));
1268
1269 /* Copy the data. */
1270 memcpy (value_contents_all_raw (dst) + dst_offset,
1271 value_contents_all_raw (src) + src_offset,
1272 length);
1273
1274 /* Copy the meta-data, adjusted. */
1275 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1276 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1277 bit_length = length * TARGET_CHAR_BIT;
1278
1279 value_ranges_copy_adjusted (dst, dst_bit_offset,
1280 src, src_bit_offset,
1281 bit_length);
1282 }
1283
1284 /* Copy LENGTH bytes of SRC value's (all) contents
1285 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1286 (all) contents, starting at DST_OFFSET. If unavailable contents
1287 are being copied from SRC, the corresponding DST contents are
1288 marked unavailable accordingly. DST must not be lazy. If SRC is
1289 lazy, it will be fetched now.
1290
1291 It is assumed the contents of DST in the [DST_OFFSET,
1292 DST_OFFSET+LENGTH) range are wholly available. */
1293
1294 void
1295 value_contents_copy (struct value *dst, int dst_offset,
1296 struct value *src, int src_offset, int length)
1297 {
1298 if (src->lazy)
1299 value_fetch_lazy (src);
1300
1301 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1302 }
1303
1304 int
1305 value_lazy (struct value *value)
1306 {
1307 return value->lazy;
1308 }
1309
1310 void
1311 set_value_lazy (struct value *value, int val)
1312 {
1313 value->lazy = val;
1314 }
1315
1316 int
1317 value_stack (struct value *value)
1318 {
1319 return value->stack;
1320 }
1321
1322 void
1323 set_value_stack (struct value *value, int val)
1324 {
1325 value->stack = val;
1326 }
1327
1328 const gdb_byte *
1329 value_contents (struct value *value)
1330 {
1331 const gdb_byte *result = value_contents_writeable (value);
1332 require_not_optimized_out (value);
1333 require_available (value);
1334 return result;
1335 }
1336
1337 gdb_byte *
1338 value_contents_writeable (struct value *value)
1339 {
1340 if (value->lazy)
1341 value_fetch_lazy (value);
1342 return value_contents_raw (value);
1343 }
1344
1345 int
1346 value_optimized_out (struct value *value)
1347 {
1348 /* We can only know if a value is optimized out once we have tried to
1349 fetch it. */
1350 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1351 value_fetch_lazy (value);
1352
1353 return !VEC_empty (range_s, value->optimized_out);
1354 }
1355
1356 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1357 the following LENGTH bytes. */
1358
1359 void
1360 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1361 {
1362 mark_value_bits_optimized_out (value,
1363 offset * TARGET_CHAR_BIT,
1364 length * TARGET_CHAR_BIT);
1365 }
1366
1367 /* See value.h. */
1368
1369 void
1370 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1371 {
1372 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1373 }
1374
1375 int
1376 value_bits_synthetic_pointer (const struct value *value,
1377 int offset, int length)
1378 {
1379 if (value->lval != lval_computed
1380 || !value->location.computed.funcs->check_synthetic_pointer)
1381 return 0;
1382 return value->location.computed.funcs->check_synthetic_pointer (value,
1383 offset,
1384 length);
1385 }
1386
1387 int
1388 value_embedded_offset (struct value *value)
1389 {
1390 return value->embedded_offset;
1391 }
1392
1393 void
1394 set_value_embedded_offset (struct value *value, int val)
1395 {
1396 value->embedded_offset = val;
1397 }
1398
1399 int
1400 value_pointed_to_offset (struct value *value)
1401 {
1402 return value->pointed_to_offset;
1403 }
1404
1405 void
1406 set_value_pointed_to_offset (struct value *value, int val)
1407 {
1408 value->pointed_to_offset = val;
1409 }
1410
1411 const struct lval_funcs *
1412 value_computed_funcs (const struct value *v)
1413 {
1414 gdb_assert (value_lval_const (v) == lval_computed);
1415
1416 return v->location.computed.funcs;
1417 }
1418
1419 void *
1420 value_computed_closure (const struct value *v)
1421 {
1422 gdb_assert (v->lval == lval_computed);
1423
1424 return v->location.computed.closure;
1425 }
1426
1427 enum lval_type *
1428 deprecated_value_lval_hack (struct value *value)
1429 {
1430 return &value->lval;
1431 }
1432
1433 enum lval_type
1434 value_lval_const (const struct value *value)
1435 {
1436 return value->lval;
1437 }
1438
1439 CORE_ADDR
1440 value_address (const struct value *value)
1441 {
1442 if (value->lval == lval_internalvar
1443 || value->lval == lval_internalvar_component
1444 || value->lval == lval_xcallable)
1445 return 0;
1446 if (value->parent != NULL)
1447 return value_address (value->parent) + value->offset;
1448 else
1449 return value->location.address + value->offset;
1450 }
1451
1452 CORE_ADDR
1453 value_raw_address (struct value *value)
1454 {
1455 if (value->lval == lval_internalvar
1456 || value->lval == lval_internalvar_component
1457 || value->lval == lval_xcallable)
1458 return 0;
1459 return value->location.address;
1460 }
1461
1462 void
1463 set_value_address (struct value *value, CORE_ADDR addr)
1464 {
1465 gdb_assert (value->lval != lval_internalvar
1466 && value->lval != lval_internalvar_component
1467 && value->lval != lval_xcallable);
1468 value->location.address = addr;
1469 }
1470
1471 struct internalvar **
1472 deprecated_value_internalvar_hack (struct value *value)
1473 {
1474 return &value->location.internalvar;
1475 }
1476
1477 struct frame_id *
1478 deprecated_value_frame_id_hack (struct value *value)
1479 {
1480 return &value->frame_id;
1481 }
1482
1483 short *
1484 deprecated_value_regnum_hack (struct value *value)
1485 {
1486 return &value->regnum;
1487 }
1488
1489 int
1490 deprecated_value_modifiable (struct value *value)
1491 {
1492 return value->modifiable;
1493 }
1494 \f
1495 /* Return a mark in the value chain. All values allocated after the
1496 mark is obtained (except for those released) are subject to being freed
1497 if a subsequent value_free_to_mark is passed the mark. */
1498 struct value *
1499 value_mark (void)
1500 {
1501 return all_values;
1502 }
1503
1504 /* Take a reference to VAL. VAL will not be deallocated until all
1505 references are released. */
1506
1507 void
1508 value_incref (struct value *val)
1509 {
1510 val->reference_count++;
1511 }
1512
1513 /* Release a reference to VAL, which was acquired with value_incref.
1514 This function is also called to deallocate values from the value
1515 chain. */
1516
1517 void
1518 value_free (struct value *val)
1519 {
1520 if (val)
1521 {
1522 gdb_assert (val->reference_count > 0);
1523 val->reference_count--;
1524 if (val->reference_count > 0)
1525 return;
1526
1527 /* If there's an associated parent value, drop our reference to
1528 it. */
1529 if (val->parent != NULL)
1530 value_free (val->parent);
1531
1532 if (VALUE_LVAL (val) == lval_computed)
1533 {
1534 const struct lval_funcs *funcs = val->location.computed.funcs;
1535
1536 if (funcs->free_closure)
1537 funcs->free_closure (val);
1538 }
1539 else if (VALUE_LVAL (val) == lval_xcallable)
1540 free_xmethod_worker (val->location.xm_worker);
1541
1542 xfree (val->contents);
1543 VEC_free (range_s, val->unavailable);
1544 }
1545 xfree (val);
1546 }
1547
1548 /* Free all values allocated since MARK was obtained by value_mark
1549 (except for those released). */
1550 void
1551 value_free_to_mark (struct value *mark)
1552 {
1553 struct value *val;
1554 struct value *next;
1555
1556 for (val = all_values; val && val != mark; val = next)
1557 {
1558 next = val->next;
1559 val->released = 1;
1560 value_free (val);
1561 }
1562 all_values = val;
1563 }
1564
1565 /* Free all the values that have been allocated (except for those released).
1566 Call after each command, successful or not.
1567 In practice this is called before each command, which is sufficient. */
1568
1569 void
1570 free_all_values (void)
1571 {
1572 struct value *val;
1573 struct value *next;
1574
1575 for (val = all_values; val; val = next)
1576 {
1577 next = val->next;
1578 val->released = 1;
1579 value_free (val);
1580 }
1581
1582 all_values = 0;
1583 }
1584
1585 /* Frees all the elements in a chain of values. */
1586
1587 void
1588 free_value_chain (struct value *v)
1589 {
1590 struct value *next;
1591
1592 for (; v; v = next)
1593 {
1594 next = value_next (v);
1595 value_free (v);
1596 }
1597 }
1598
1599 /* Remove VAL from the chain all_values
1600 so it will not be freed automatically. */
1601
1602 void
1603 release_value (struct value *val)
1604 {
1605 struct value *v;
1606
1607 if (all_values == val)
1608 {
1609 all_values = val->next;
1610 val->next = NULL;
1611 val->released = 1;
1612 return;
1613 }
1614
1615 for (v = all_values; v; v = v->next)
1616 {
1617 if (v->next == val)
1618 {
1619 v->next = val->next;
1620 val->next = NULL;
1621 val->released = 1;
1622 break;
1623 }
1624 }
1625 }
1626
1627 /* If the value is not already released, release it.
1628 If the value is already released, increment its reference count.
1629 That is, this function ensures that the value is released from the
1630 value chain and that the caller owns a reference to it. */
1631
1632 void
1633 release_value_or_incref (struct value *val)
1634 {
1635 if (val->released)
1636 value_incref (val);
1637 else
1638 release_value (val);
1639 }
1640
1641 /* Release all values up to mark */
1642 struct value *
1643 value_release_to_mark (struct value *mark)
1644 {
1645 struct value *val;
1646 struct value *next;
1647
1648 for (val = next = all_values; next; next = next->next)
1649 {
1650 if (next->next == mark)
1651 {
1652 all_values = next->next;
1653 next->next = NULL;
1654 return val;
1655 }
1656 next->released = 1;
1657 }
1658 all_values = 0;
1659 return val;
1660 }
1661
1662 /* Return a copy of the value ARG.
1663 It contains the same contents, for same memory address,
1664 but it's a different block of storage. */
1665
1666 struct value *
1667 value_copy (struct value *arg)
1668 {
1669 struct type *encl_type = value_enclosing_type (arg);
1670 struct value *val;
1671
1672 if (value_lazy (arg))
1673 val = allocate_value_lazy (encl_type);
1674 else
1675 val = allocate_value (encl_type);
1676 val->type = arg->type;
1677 VALUE_LVAL (val) = VALUE_LVAL (arg);
1678 val->location = arg->location;
1679 val->offset = arg->offset;
1680 val->bitpos = arg->bitpos;
1681 val->bitsize = arg->bitsize;
1682 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1683 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1684 val->lazy = arg->lazy;
1685 val->embedded_offset = value_embedded_offset (arg);
1686 val->pointed_to_offset = arg->pointed_to_offset;
1687 val->modifiable = arg->modifiable;
1688 if (!value_lazy (val))
1689 {
1690 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1691 TYPE_LENGTH (value_enclosing_type (arg)));
1692
1693 }
1694 val->unavailable = VEC_copy (range_s, arg->unavailable);
1695 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1696 set_value_parent (val, arg->parent);
1697 if (VALUE_LVAL (val) == lval_computed)
1698 {
1699 const struct lval_funcs *funcs = val->location.computed.funcs;
1700
1701 if (funcs->copy_closure)
1702 val->location.computed.closure = funcs->copy_closure (val);
1703 }
1704 return val;
1705 }
1706
1707 /* Return a version of ARG that is non-lvalue. */
1708
1709 struct value *
1710 value_non_lval (struct value *arg)
1711 {
1712 if (VALUE_LVAL (arg) != not_lval)
1713 {
1714 struct type *enc_type = value_enclosing_type (arg);
1715 struct value *val = allocate_value (enc_type);
1716
1717 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1718 TYPE_LENGTH (enc_type));
1719 val->type = arg->type;
1720 set_value_embedded_offset (val, value_embedded_offset (arg));
1721 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1722 return val;
1723 }
1724 return arg;
1725 }
1726
1727 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1728
1729 void
1730 value_force_lval (struct value *v, CORE_ADDR addr)
1731 {
1732 gdb_assert (VALUE_LVAL (v) == not_lval);
1733
1734 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1735 v->lval = lval_memory;
1736 v->location.address = addr;
1737 }
1738
1739 void
1740 set_value_component_location (struct value *component,
1741 const struct value *whole)
1742 {
1743 gdb_assert (whole->lval != lval_xcallable);
1744
1745 if (whole->lval == lval_internalvar)
1746 VALUE_LVAL (component) = lval_internalvar_component;
1747 else
1748 VALUE_LVAL (component) = whole->lval;
1749
1750 component->location = whole->location;
1751 if (whole->lval == lval_computed)
1752 {
1753 const struct lval_funcs *funcs = whole->location.computed.funcs;
1754
1755 if (funcs->copy_closure)
1756 component->location.computed.closure = funcs->copy_closure (whole);
1757 }
1758 }
1759
1760 \f
1761 /* Access to the value history. */
1762
1763 /* Record a new value in the value history.
1764 Returns the absolute history index of the entry. */
1765
1766 int
1767 record_latest_value (struct value *val)
1768 {
1769 int i;
1770
1771 /* We don't want this value to have anything to do with the inferior anymore.
1772 In particular, "set $1 = 50" should not affect the variable from which
1773 the value was taken, and fast watchpoints should be able to assume that
1774 a value on the value history never changes. */
1775 if (value_lazy (val))
1776 value_fetch_lazy (val);
1777 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1778 from. This is a bit dubious, because then *&$1 does not just return $1
1779 but the current contents of that location. c'est la vie... */
1780 val->modifiable = 0;
1781
1782 /* The value may have already been released, in which case we're adding a
1783 new reference for its entry in the history. That is why we call
1784 release_value_or_incref here instead of release_value. */
1785 release_value_or_incref (val);
1786
1787 /* Here we treat value_history_count as origin-zero
1788 and applying to the value being stored now. */
1789
1790 i = value_history_count % VALUE_HISTORY_CHUNK;
1791 if (i == 0)
1792 {
1793 struct value_history_chunk *new
1794 = (struct value_history_chunk *)
1795
1796 xmalloc (sizeof (struct value_history_chunk));
1797 memset (new->values, 0, sizeof new->values);
1798 new->next = value_history_chain;
1799 value_history_chain = new;
1800 }
1801
1802 value_history_chain->values[i] = val;
1803
1804 /* Now we regard value_history_count as origin-one
1805 and applying to the value just stored. */
1806
1807 return ++value_history_count;
1808 }
1809
1810 /* Return a copy of the value in the history with sequence number NUM. */
1811
1812 struct value *
1813 access_value_history (int num)
1814 {
1815 struct value_history_chunk *chunk;
1816 int i;
1817 int absnum = num;
1818
1819 if (absnum <= 0)
1820 absnum += value_history_count;
1821
1822 if (absnum <= 0)
1823 {
1824 if (num == 0)
1825 error (_("The history is empty."));
1826 else if (num == 1)
1827 error (_("There is only one value in the history."));
1828 else
1829 error (_("History does not go back to $$%d."), -num);
1830 }
1831 if (absnum > value_history_count)
1832 error (_("History has not yet reached $%d."), absnum);
1833
1834 absnum--;
1835
1836 /* Now absnum is always absolute and origin zero. */
1837
1838 chunk = value_history_chain;
1839 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1840 - absnum / VALUE_HISTORY_CHUNK;
1841 i > 0; i--)
1842 chunk = chunk->next;
1843
1844 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1845 }
1846
1847 static void
1848 show_values (char *num_exp, int from_tty)
1849 {
1850 int i;
1851 struct value *val;
1852 static int num = 1;
1853
1854 if (num_exp)
1855 {
1856 /* "show values +" should print from the stored position.
1857 "show values <exp>" should print around value number <exp>. */
1858 if (num_exp[0] != '+' || num_exp[1] != '\0')
1859 num = parse_and_eval_long (num_exp) - 5;
1860 }
1861 else
1862 {
1863 /* "show values" means print the last 10 values. */
1864 num = value_history_count - 9;
1865 }
1866
1867 if (num <= 0)
1868 num = 1;
1869
1870 for (i = num; i < num + 10 && i <= value_history_count; i++)
1871 {
1872 struct value_print_options opts;
1873
1874 val = access_value_history (i);
1875 printf_filtered (("$%d = "), i);
1876 get_user_print_options (&opts);
1877 value_print (val, gdb_stdout, &opts);
1878 printf_filtered (("\n"));
1879 }
1880
1881 /* The next "show values +" should start after what we just printed. */
1882 num += 10;
1883
1884 /* Hitting just return after this command should do the same thing as
1885 "show values +". If num_exp is null, this is unnecessary, since
1886 "show values +" is not useful after "show values". */
1887 if (from_tty && num_exp)
1888 {
1889 num_exp[0] = '+';
1890 num_exp[1] = '\0';
1891 }
1892 }
1893 \f
1894 /* Internal variables. These are variables within the debugger
1895 that hold values assigned by debugger commands.
1896 The user refers to them with a '$' prefix
1897 that does not appear in the variable names stored internally. */
1898
1899 struct internalvar
1900 {
1901 struct internalvar *next;
1902 char *name;
1903
1904 /* We support various different kinds of content of an internal variable.
1905 enum internalvar_kind specifies the kind, and union internalvar_data
1906 provides the data associated with this particular kind. */
1907
1908 enum internalvar_kind
1909 {
1910 /* The internal variable is empty. */
1911 INTERNALVAR_VOID,
1912
1913 /* The value of the internal variable is provided directly as
1914 a GDB value object. */
1915 INTERNALVAR_VALUE,
1916
1917 /* A fresh value is computed via a call-back routine on every
1918 access to the internal variable. */
1919 INTERNALVAR_MAKE_VALUE,
1920
1921 /* The internal variable holds a GDB internal convenience function. */
1922 INTERNALVAR_FUNCTION,
1923
1924 /* The variable holds an integer value. */
1925 INTERNALVAR_INTEGER,
1926
1927 /* The variable holds a GDB-provided string. */
1928 INTERNALVAR_STRING,
1929
1930 } kind;
1931
1932 union internalvar_data
1933 {
1934 /* A value object used with INTERNALVAR_VALUE. */
1935 struct value *value;
1936
1937 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1938 struct
1939 {
1940 /* The functions to call. */
1941 const struct internalvar_funcs *functions;
1942
1943 /* The function's user-data. */
1944 void *data;
1945 } make_value;
1946
1947 /* The internal function used with INTERNALVAR_FUNCTION. */
1948 struct
1949 {
1950 struct internal_function *function;
1951 /* True if this is the canonical name for the function. */
1952 int canonical;
1953 } fn;
1954
1955 /* An integer value used with INTERNALVAR_INTEGER. */
1956 struct
1957 {
1958 /* If type is non-NULL, it will be used as the type to generate
1959 a value for this internal variable. If type is NULL, a default
1960 integer type for the architecture is used. */
1961 struct type *type;
1962 LONGEST val;
1963 } integer;
1964
1965 /* A string value used with INTERNALVAR_STRING. */
1966 char *string;
1967 } u;
1968 };
1969
1970 static struct internalvar *internalvars;
1971
1972 /* If the variable does not already exist create it and give it the
1973 value given. If no value is given then the default is zero. */
1974 static void
1975 init_if_undefined_command (char* args, int from_tty)
1976 {
1977 struct internalvar* intvar;
1978
1979 /* Parse the expression - this is taken from set_command(). */
1980 struct expression *expr = parse_expression (args);
1981 register struct cleanup *old_chain =
1982 make_cleanup (free_current_contents, &expr);
1983
1984 /* Validate the expression.
1985 Was the expression an assignment?
1986 Or even an expression at all? */
1987 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1988 error (_("Init-if-undefined requires an assignment expression."));
1989
1990 /* Extract the variable from the parsed expression.
1991 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1992 if (expr->elts[1].opcode != OP_INTERNALVAR)
1993 error (_("The first parameter to init-if-undefined "
1994 "should be a GDB variable."));
1995 intvar = expr->elts[2].internalvar;
1996
1997 /* Only evaluate the expression if the lvalue is void.
1998 This may still fail if the expresssion is invalid. */
1999 if (intvar->kind == INTERNALVAR_VOID)
2000 evaluate_expression (expr);
2001
2002 do_cleanups (old_chain);
2003 }
2004
2005
2006 /* Look up an internal variable with name NAME. NAME should not
2007 normally include a dollar sign.
2008
2009 If the specified internal variable does not exist,
2010 the return value is NULL. */
2011
2012 struct internalvar *
2013 lookup_only_internalvar (const char *name)
2014 {
2015 struct internalvar *var;
2016
2017 for (var = internalvars; var; var = var->next)
2018 if (strcmp (var->name, name) == 0)
2019 return var;
2020
2021 return NULL;
2022 }
2023
2024 /* Complete NAME by comparing it to the names of internal variables.
2025 Returns a vector of newly allocated strings, or NULL if no matches
2026 were found. */
2027
2028 VEC (char_ptr) *
2029 complete_internalvar (const char *name)
2030 {
2031 VEC (char_ptr) *result = NULL;
2032 struct internalvar *var;
2033 int len;
2034
2035 len = strlen (name);
2036
2037 for (var = internalvars; var; var = var->next)
2038 if (strncmp (var->name, name, len) == 0)
2039 {
2040 char *r = xstrdup (var->name);
2041
2042 VEC_safe_push (char_ptr, result, r);
2043 }
2044
2045 return result;
2046 }
2047
2048 /* Create an internal variable with name NAME and with a void value.
2049 NAME should not normally include a dollar sign. */
2050
2051 struct internalvar *
2052 create_internalvar (const char *name)
2053 {
2054 struct internalvar *var;
2055
2056 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2057 var->name = concat (name, (char *)NULL);
2058 var->kind = INTERNALVAR_VOID;
2059 var->next = internalvars;
2060 internalvars = var;
2061 return var;
2062 }
2063
2064 /* Create an internal variable with name NAME and register FUN as the
2065 function that value_of_internalvar uses to create a value whenever
2066 this variable is referenced. NAME should not normally include a
2067 dollar sign. DATA is passed uninterpreted to FUN when it is
2068 called. CLEANUP, if not NULL, is called when the internal variable
2069 is destroyed. It is passed DATA as its only argument. */
2070
2071 struct internalvar *
2072 create_internalvar_type_lazy (const char *name,
2073 const struct internalvar_funcs *funcs,
2074 void *data)
2075 {
2076 struct internalvar *var = create_internalvar (name);
2077
2078 var->kind = INTERNALVAR_MAKE_VALUE;
2079 var->u.make_value.functions = funcs;
2080 var->u.make_value.data = data;
2081 return var;
2082 }
2083
2084 /* See documentation in value.h. */
2085
2086 int
2087 compile_internalvar_to_ax (struct internalvar *var,
2088 struct agent_expr *expr,
2089 struct axs_value *value)
2090 {
2091 if (var->kind != INTERNALVAR_MAKE_VALUE
2092 || var->u.make_value.functions->compile_to_ax == NULL)
2093 return 0;
2094
2095 var->u.make_value.functions->compile_to_ax (var, expr, value,
2096 var->u.make_value.data);
2097 return 1;
2098 }
2099
2100 /* Look up an internal variable with name NAME. NAME should not
2101 normally include a dollar sign.
2102
2103 If the specified internal variable does not exist,
2104 one is created, with a void value. */
2105
2106 struct internalvar *
2107 lookup_internalvar (const char *name)
2108 {
2109 struct internalvar *var;
2110
2111 var = lookup_only_internalvar (name);
2112 if (var)
2113 return var;
2114
2115 return create_internalvar (name);
2116 }
2117
2118 /* Return current value of internal variable VAR. For variables that
2119 are not inherently typed, use a value type appropriate for GDBARCH. */
2120
2121 struct value *
2122 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2123 {
2124 struct value *val;
2125 struct trace_state_variable *tsv;
2126
2127 /* If there is a trace state variable of the same name, assume that
2128 is what we really want to see. */
2129 tsv = find_trace_state_variable (var->name);
2130 if (tsv)
2131 {
2132 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2133 &(tsv->value));
2134 if (tsv->value_known)
2135 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2136 tsv->value);
2137 else
2138 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2139 return val;
2140 }
2141
2142 switch (var->kind)
2143 {
2144 case INTERNALVAR_VOID:
2145 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2146 break;
2147
2148 case INTERNALVAR_FUNCTION:
2149 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2150 break;
2151
2152 case INTERNALVAR_INTEGER:
2153 if (!var->u.integer.type)
2154 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2155 var->u.integer.val);
2156 else
2157 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2158 break;
2159
2160 case INTERNALVAR_STRING:
2161 val = value_cstring (var->u.string, strlen (var->u.string),
2162 builtin_type (gdbarch)->builtin_char);
2163 break;
2164
2165 case INTERNALVAR_VALUE:
2166 val = value_copy (var->u.value);
2167 if (value_lazy (val))
2168 value_fetch_lazy (val);
2169 break;
2170
2171 case INTERNALVAR_MAKE_VALUE:
2172 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2173 var->u.make_value.data);
2174 break;
2175
2176 default:
2177 internal_error (__FILE__, __LINE__, _("bad kind"));
2178 }
2179
2180 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2181 on this value go back to affect the original internal variable.
2182
2183 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2184 no underlying modifyable state in the internal variable.
2185
2186 Likewise, if the variable's value is a computed lvalue, we want
2187 references to it to produce another computed lvalue, where
2188 references and assignments actually operate through the
2189 computed value's functions.
2190
2191 This means that internal variables with computed values
2192 behave a little differently from other internal variables:
2193 assignments to them don't just replace the previous value
2194 altogether. At the moment, this seems like the behavior we
2195 want. */
2196
2197 if (var->kind != INTERNALVAR_MAKE_VALUE
2198 && val->lval != lval_computed)
2199 {
2200 VALUE_LVAL (val) = lval_internalvar;
2201 VALUE_INTERNALVAR (val) = var;
2202 }
2203
2204 return val;
2205 }
2206
2207 int
2208 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2209 {
2210 if (var->kind == INTERNALVAR_INTEGER)
2211 {
2212 *result = var->u.integer.val;
2213 return 1;
2214 }
2215
2216 if (var->kind == INTERNALVAR_VALUE)
2217 {
2218 struct type *type = check_typedef (value_type (var->u.value));
2219
2220 if (TYPE_CODE (type) == TYPE_CODE_INT)
2221 {
2222 *result = value_as_long (var->u.value);
2223 return 1;
2224 }
2225 }
2226
2227 return 0;
2228 }
2229
2230 static int
2231 get_internalvar_function (struct internalvar *var,
2232 struct internal_function **result)
2233 {
2234 switch (var->kind)
2235 {
2236 case INTERNALVAR_FUNCTION:
2237 *result = var->u.fn.function;
2238 return 1;
2239
2240 default:
2241 return 0;
2242 }
2243 }
2244
2245 void
2246 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2247 int bitsize, struct value *newval)
2248 {
2249 gdb_byte *addr;
2250
2251 switch (var->kind)
2252 {
2253 case INTERNALVAR_VALUE:
2254 addr = value_contents_writeable (var->u.value);
2255
2256 if (bitsize)
2257 modify_field (value_type (var->u.value), addr + offset,
2258 value_as_long (newval), bitpos, bitsize);
2259 else
2260 memcpy (addr + offset, value_contents (newval),
2261 TYPE_LENGTH (value_type (newval)));
2262 break;
2263
2264 default:
2265 /* We can never get a component of any other kind. */
2266 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2267 }
2268 }
2269
2270 void
2271 set_internalvar (struct internalvar *var, struct value *val)
2272 {
2273 enum internalvar_kind new_kind;
2274 union internalvar_data new_data = { 0 };
2275
2276 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2277 error (_("Cannot overwrite convenience function %s"), var->name);
2278
2279 /* Prepare new contents. */
2280 switch (TYPE_CODE (check_typedef (value_type (val))))
2281 {
2282 case TYPE_CODE_VOID:
2283 new_kind = INTERNALVAR_VOID;
2284 break;
2285
2286 case TYPE_CODE_INTERNAL_FUNCTION:
2287 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2288 new_kind = INTERNALVAR_FUNCTION;
2289 get_internalvar_function (VALUE_INTERNALVAR (val),
2290 &new_data.fn.function);
2291 /* Copies created here are never canonical. */
2292 break;
2293
2294 default:
2295 new_kind = INTERNALVAR_VALUE;
2296 new_data.value = value_copy (val);
2297 new_data.value->modifiable = 1;
2298
2299 /* Force the value to be fetched from the target now, to avoid problems
2300 later when this internalvar is referenced and the target is gone or
2301 has changed. */
2302 if (value_lazy (new_data.value))
2303 value_fetch_lazy (new_data.value);
2304
2305 /* Release the value from the value chain to prevent it from being
2306 deleted by free_all_values. From here on this function should not
2307 call error () until new_data is installed into the var->u to avoid
2308 leaking memory. */
2309 release_value (new_data.value);
2310 break;
2311 }
2312
2313 /* Clean up old contents. */
2314 clear_internalvar (var);
2315
2316 /* Switch over. */
2317 var->kind = new_kind;
2318 var->u = new_data;
2319 /* End code which must not call error(). */
2320 }
2321
2322 void
2323 set_internalvar_integer (struct internalvar *var, LONGEST l)
2324 {
2325 /* Clean up old contents. */
2326 clear_internalvar (var);
2327
2328 var->kind = INTERNALVAR_INTEGER;
2329 var->u.integer.type = NULL;
2330 var->u.integer.val = l;
2331 }
2332
2333 void
2334 set_internalvar_string (struct internalvar *var, const char *string)
2335 {
2336 /* Clean up old contents. */
2337 clear_internalvar (var);
2338
2339 var->kind = INTERNALVAR_STRING;
2340 var->u.string = xstrdup (string);
2341 }
2342
2343 static void
2344 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2345 {
2346 /* Clean up old contents. */
2347 clear_internalvar (var);
2348
2349 var->kind = INTERNALVAR_FUNCTION;
2350 var->u.fn.function = f;
2351 var->u.fn.canonical = 1;
2352 /* Variables installed here are always the canonical version. */
2353 }
2354
2355 void
2356 clear_internalvar (struct internalvar *var)
2357 {
2358 /* Clean up old contents. */
2359 switch (var->kind)
2360 {
2361 case INTERNALVAR_VALUE:
2362 value_free (var->u.value);
2363 break;
2364
2365 case INTERNALVAR_STRING:
2366 xfree (var->u.string);
2367 break;
2368
2369 case INTERNALVAR_MAKE_VALUE:
2370 if (var->u.make_value.functions->destroy != NULL)
2371 var->u.make_value.functions->destroy (var->u.make_value.data);
2372 break;
2373
2374 default:
2375 break;
2376 }
2377
2378 /* Reset to void kind. */
2379 var->kind = INTERNALVAR_VOID;
2380 }
2381
2382 char *
2383 internalvar_name (struct internalvar *var)
2384 {
2385 return var->name;
2386 }
2387
2388 static struct internal_function *
2389 create_internal_function (const char *name,
2390 internal_function_fn handler, void *cookie)
2391 {
2392 struct internal_function *ifn = XNEW (struct internal_function);
2393
2394 ifn->name = xstrdup (name);
2395 ifn->handler = handler;
2396 ifn->cookie = cookie;
2397 return ifn;
2398 }
2399
2400 char *
2401 value_internal_function_name (struct value *val)
2402 {
2403 struct internal_function *ifn;
2404 int result;
2405
2406 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2407 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2408 gdb_assert (result);
2409
2410 return ifn->name;
2411 }
2412
2413 struct value *
2414 call_internal_function (struct gdbarch *gdbarch,
2415 const struct language_defn *language,
2416 struct value *func, int argc, struct value **argv)
2417 {
2418 struct internal_function *ifn;
2419 int result;
2420
2421 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2422 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2423 gdb_assert (result);
2424
2425 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2426 }
2427
2428 /* The 'function' command. This does nothing -- it is just a
2429 placeholder to let "help function NAME" work. This is also used as
2430 the implementation of the sub-command that is created when
2431 registering an internal function. */
2432 static void
2433 function_command (char *command, int from_tty)
2434 {
2435 /* Do nothing. */
2436 }
2437
2438 /* Clean up if an internal function's command is destroyed. */
2439 static void
2440 function_destroyer (struct cmd_list_element *self, void *ignore)
2441 {
2442 xfree ((char *) self->name);
2443 xfree ((char *) self->doc);
2444 }
2445
2446 /* Add a new internal function. NAME is the name of the function; DOC
2447 is a documentation string describing the function. HANDLER is
2448 called when the function is invoked. COOKIE is an arbitrary
2449 pointer which is passed to HANDLER and is intended for "user
2450 data". */
2451 void
2452 add_internal_function (const char *name, const char *doc,
2453 internal_function_fn handler, void *cookie)
2454 {
2455 struct cmd_list_element *cmd;
2456 struct internal_function *ifn;
2457 struct internalvar *var = lookup_internalvar (name);
2458
2459 ifn = create_internal_function (name, handler, cookie);
2460 set_internalvar_function (var, ifn);
2461
2462 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2463 &functionlist);
2464 cmd->destroyer = function_destroyer;
2465 }
2466
2467 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2468 prevent cycles / duplicates. */
2469
2470 void
2471 preserve_one_value (struct value *value, struct objfile *objfile,
2472 htab_t copied_types)
2473 {
2474 if (TYPE_OBJFILE (value->type) == objfile)
2475 value->type = copy_type_recursive (objfile, value->type, copied_types);
2476
2477 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2478 value->enclosing_type = copy_type_recursive (objfile,
2479 value->enclosing_type,
2480 copied_types);
2481 }
2482
2483 /* Likewise for internal variable VAR. */
2484
2485 static void
2486 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2487 htab_t copied_types)
2488 {
2489 switch (var->kind)
2490 {
2491 case INTERNALVAR_INTEGER:
2492 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2493 var->u.integer.type
2494 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2495 break;
2496
2497 case INTERNALVAR_VALUE:
2498 preserve_one_value (var->u.value, objfile, copied_types);
2499 break;
2500 }
2501 }
2502
2503 /* Update the internal variables and value history when OBJFILE is
2504 discarded; we must copy the types out of the objfile. New global types
2505 will be created for every convenience variable which currently points to
2506 this objfile's types, and the convenience variables will be adjusted to
2507 use the new global types. */
2508
2509 void
2510 preserve_values (struct objfile *objfile)
2511 {
2512 htab_t copied_types;
2513 struct value_history_chunk *cur;
2514 struct internalvar *var;
2515 int i;
2516
2517 /* Create the hash table. We allocate on the objfile's obstack, since
2518 it is soon to be deleted. */
2519 copied_types = create_copied_types_hash (objfile);
2520
2521 for (cur = value_history_chain; cur; cur = cur->next)
2522 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2523 if (cur->values[i])
2524 preserve_one_value (cur->values[i], objfile, copied_types);
2525
2526 for (var = internalvars; var; var = var->next)
2527 preserve_one_internalvar (var, objfile, copied_types);
2528
2529 preserve_ext_lang_values (objfile, copied_types);
2530
2531 htab_delete (copied_types);
2532 }
2533
2534 static void
2535 show_convenience (char *ignore, int from_tty)
2536 {
2537 struct gdbarch *gdbarch = get_current_arch ();
2538 struct internalvar *var;
2539 int varseen = 0;
2540 struct value_print_options opts;
2541
2542 get_user_print_options (&opts);
2543 for (var = internalvars; var; var = var->next)
2544 {
2545 volatile struct gdb_exception ex;
2546
2547 if (!varseen)
2548 {
2549 varseen = 1;
2550 }
2551 printf_filtered (("$%s = "), var->name);
2552
2553 TRY_CATCH (ex, RETURN_MASK_ERROR)
2554 {
2555 struct value *val;
2556
2557 val = value_of_internalvar (gdbarch, var);
2558 value_print (val, gdb_stdout, &opts);
2559 }
2560 if (ex.reason < 0)
2561 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2562 printf_filtered (("\n"));
2563 }
2564 if (!varseen)
2565 {
2566 /* This text does not mention convenience functions on purpose.
2567 The user can't create them except via Python, and if Python support
2568 is installed this message will never be printed ($_streq will
2569 exist). */
2570 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2571 "Convenience variables have "
2572 "names starting with \"$\";\n"
2573 "use \"set\" as in \"set "
2574 "$foo = 5\" to define them.\n"));
2575 }
2576 }
2577 \f
2578 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2579
2580 struct value *
2581 value_of_xmethod (struct xmethod_worker *worker)
2582 {
2583 if (worker->value == NULL)
2584 {
2585 struct value *v;
2586
2587 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2588 v->lval = lval_xcallable;
2589 v->location.xm_worker = worker;
2590 v->modifiable = 0;
2591 worker->value = v;
2592 }
2593
2594 return worker->value;
2595 }
2596
2597 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2598
2599 struct value *
2600 call_xmethod (struct value *method, int argc, struct value **argv)
2601 {
2602 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2603 && method->lval == lval_xcallable && argc > 0);
2604
2605 return invoke_xmethod (method->location.xm_worker,
2606 argv[0], argv + 1, argc - 1);
2607 }
2608 \f
2609 /* Extract a value as a C number (either long or double).
2610 Knows how to convert fixed values to double, or
2611 floating values to long.
2612 Does not deallocate the value. */
2613
2614 LONGEST
2615 value_as_long (struct value *val)
2616 {
2617 /* This coerces arrays and functions, which is necessary (e.g.
2618 in disassemble_command). It also dereferences references, which
2619 I suspect is the most logical thing to do. */
2620 val = coerce_array (val);
2621 return unpack_long (value_type (val), value_contents (val));
2622 }
2623
2624 DOUBLEST
2625 value_as_double (struct value *val)
2626 {
2627 DOUBLEST foo;
2628 int inv;
2629
2630 foo = unpack_double (value_type (val), value_contents (val), &inv);
2631 if (inv)
2632 error (_("Invalid floating value found in program."));
2633 return foo;
2634 }
2635
2636 /* Extract a value as a C pointer. Does not deallocate the value.
2637 Note that val's type may not actually be a pointer; value_as_long
2638 handles all the cases. */
2639 CORE_ADDR
2640 value_as_address (struct value *val)
2641 {
2642 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2643
2644 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2645 whether we want this to be true eventually. */
2646 #if 0
2647 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2648 non-address (e.g. argument to "signal", "info break", etc.), or
2649 for pointers to char, in which the low bits *are* significant. */
2650 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2651 #else
2652
2653 /* There are several targets (IA-64, PowerPC, and others) which
2654 don't represent pointers to functions as simply the address of
2655 the function's entry point. For example, on the IA-64, a
2656 function pointer points to a two-word descriptor, generated by
2657 the linker, which contains the function's entry point, and the
2658 value the IA-64 "global pointer" register should have --- to
2659 support position-independent code. The linker generates
2660 descriptors only for those functions whose addresses are taken.
2661
2662 On such targets, it's difficult for GDB to convert an arbitrary
2663 function address into a function pointer; it has to either find
2664 an existing descriptor for that function, or call malloc and
2665 build its own. On some targets, it is impossible for GDB to
2666 build a descriptor at all: the descriptor must contain a jump
2667 instruction; data memory cannot be executed; and code memory
2668 cannot be modified.
2669
2670 Upon entry to this function, if VAL is a value of type `function'
2671 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2672 value_address (val) is the address of the function. This is what
2673 you'll get if you evaluate an expression like `main'. The call
2674 to COERCE_ARRAY below actually does all the usual unary
2675 conversions, which includes converting values of type `function'
2676 to `pointer to function'. This is the challenging conversion
2677 discussed above. Then, `unpack_long' will convert that pointer
2678 back into an address.
2679
2680 So, suppose the user types `disassemble foo' on an architecture
2681 with a strange function pointer representation, on which GDB
2682 cannot build its own descriptors, and suppose further that `foo'
2683 has no linker-built descriptor. The address->pointer conversion
2684 will signal an error and prevent the command from running, even
2685 though the next step would have been to convert the pointer
2686 directly back into the same address.
2687
2688 The following shortcut avoids this whole mess. If VAL is a
2689 function, just return its address directly. */
2690 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2691 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2692 return value_address (val);
2693
2694 val = coerce_array (val);
2695
2696 /* Some architectures (e.g. Harvard), map instruction and data
2697 addresses onto a single large unified address space. For
2698 instance: An architecture may consider a large integer in the
2699 range 0x10000000 .. 0x1000ffff to already represent a data
2700 addresses (hence not need a pointer to address conversion) while
2701 a small integer would still need to be converted integer to
2702 pointer to address. Just assume such architectures handle all
2703 integer conversions in a single function. */
2704
2705 /* JimB writes:
2706
2707 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2708 must admonish GDB hackers to make sure its behavior matches the
2709 compiler's, whenever possible.
2710
2711 In general, I think GDB should evaluate expressions the same way
2712 the compiler does. When the user copies an expression out of
2713 their source code and hands it to a `print' command, they should
2714 get the same value the compiler would have computed. Any
2715 deviation from this rule can cause major confusion and annoyance,
2716 and needs to be justified carefully. In other words, GDB doesn't
2717 really have the freedom to do these conversions in clever and
2718 useful ways.
2719
2720 AndrewC pointed out that users aren't complaining about how GDB
2721 casts integers to pointers; they are complaining that they can't
2722 take an address from a disassembly listing and give it to `x/i'.
2723 This is certainly important.
2724
2725 Adding an architecture method like integer_to_address() certainly
2726 makes it possible for GDB to "get it right" in all circumstances
2727 --- the target has complete control over how things get done, so
2728 people can Do The Right Thing for their target without breaking
2729 anyone else. The standard doesn't specify how integers get
2730 converted to pointers; usually, the ABI doesn't either, but
2731 ABI-specific code is a more reasonable place to handle it. */
2732
2733 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2734 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2735 && gdbarch_integer_to_address_p (gdbarch))
2736 return gdbarch_integer_to_address (gdbarch, value_type (val),
2737 value_contents (val));
2738
2739 return unpack_long (value_type (val), value_contents (val));
2740 #endif
2741 }
2742 \f
2743 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2744 as a long, or as a double, assuming the raw data is described
2745 by type TYPE. Knows how to convert different sizes of values
2746 and can convert between fixed and floating point. We don't assume
2747 any alignment for the raw data. Return value is in host byte order.
2748
2749 If you want functions and arrays to be coerced to pointers, and
2750 references to be dereferenced, call value_as_long() instead.
2751
2752 C++: It is assumed that the front-end has taken care of
2753 all matters concerning pointers to members. A pointer
2754 to member which reaches here is considered to be equivalent
2755 to an INT (or some size). After all, it is only an offset. */
2756
2757 LONGEST
2758 unpack_long (struct type *type, const gdb_byte *valaddr)
2759 {
2760 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2761 enum type_code code = TYPE_CODE (type);
2762 int len = TYPE_LENGTH (type);
2763 int nosign = TYPE_UNSIGNED (type);
2764
2765 switch (code)
2766 {
2767 case TYPE_CODE_TYPEDEF:
2768 return unpack_long (check_typedef (type), valaddr);
2769 case TYPE_CODE_ENUM:
2770 case TYPE_CODE_FLAGS:
2771 case TYPE_CODE_BOOL:
2772 case TYPE_CODE_INT:
2773 case TYPE_CODE_CHAR:
2774 case TYPE_CODE_RANGE:
2775 case TYPE_CODE_MEMBERPTR:
2776 if (nosign)
2777 return extract_unsigned_integer (valaddr, len, byte_order);
2778 else
2779 return extract_signed_integer (valaddr, len, byte_order);
2780
2781 case TYPE_CODE_FLT:
2782 return extract_typed_floating (valaddr, type);
2783
2784 case TYPE_CODE_DECFLOAT:
2785 /* libdecnumber has a function to convert from decimal to integer, but
2786 it doesn't work when the decimal number has a fractional part. */
2787 return decimal_to_doublest (valaddr, len, byte_order);
2788
2789 case TYPE_CODE_PTR:
2790 case TYPE_CODE_REF:
2791 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2792 whether we want this to be true eventually. */
2793 return extract_typed_address (valaddr, type);
2794
2795 default:
2796 error (_("Value can't be converted to integer."));
2797 }
2798 return 0; /* Placate lint. */
2799 }
2800
2801 /* Return a double value from the specified type and address.
2802 INVP points to an int which is set to 0 for valid value,
2803 1 for invalid value (bad float format). In either case,
2804 the returned double is OK to use. Argument is in target
2805 format, result is in host format. */
2806
2807 DOUBLEST
2808 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2809 {
2810 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2811 enum type_code code;
2812 int len;
2813 int nosign;
2814
2815 *invp = 0; /* Assume valid. */
2816 CHECK_TYPEDEF (type);
2817 code = TYPE_CODE (type);
2818 len = TYPE_LENGTH (type);
2819 nosign = TYPE_UNSIGNED (type);
2820 if (code == TYPE_CODE_FLT)
2821 {
2822 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2823 floating-point value was valid (using the macro
2824 INVALID_FLOAT). That test/macro have been removed.
2825
2826 It turns out that only the VAX defined this macro and then
2827 only in a non-portable way. Fixing the portability problem
2828 wouldn't help since the VAX floating-point code is also badly
2829 bit-rotten. The target needs to add definitions for the
2830 methods gdbarch_float_format and gdbarch_double_format - these
2831 exactly describe the target floating-point format. The
2832 problem here is that the corresponding floatformat_vax_f and
2833 floatformat_vax_d values these methods should be set to are
2834 also not defined either. Oops!
2835
2836 Hopefully someone will add both the missing floatformat
2837 definitions and the new cases for floatformat_is_valid (). */
2838
2839 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2840 {
2841 *invp = 1;
2842 return 0.0;
2843 }
2844
2845 return extract_typed_floating (valaddr, type);
2846 }
2847 else if (code == TYPE_CODE_DECFLOAT)
2848 return decimal_to_doublest (valaddr, len, byte_order);
2849 else if (nosign)
2850 {
2851 /* Unsigned -- be sure we compensate for signed LONGEST. */
2852 return (ULONGEST) unpack_long (type, valaddr);
2853 }
2854 else
2855 {
2856 /* Signed -- we are OK with unpack_long. */
2857 return unpack_long (type, valaddr);
2858 }
2859 }
2860
2861 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2862 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2863 We don't assume any alignment for the raw data. Return value is in
2864 host byte order.
2865
2866 If you want functions and arrays to be coerced to pointers, and
2867 references to be dereferenced, call value_as_address() instead.
2868
2869 C++: It is assumed that the front-end has taken care of
2870 all matters concerning pointers to members. A pointer
2871 to member which reaches here is considered to be equivalent
2872 to an INT (or some size). After all, it is only an offset. */
2873
2874 CORE_ADDR
2875 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2876 {
2877 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2878 whether we want this to be true eventually. */
2879 return unpack_long (type, valaddr);
2880 }
2881
2882 \f
2883 /* Get the value of the FIELDNO'th field (which must be static) of
2884 TYPE. */
2885
2886 struct value *
2887 value_static_field (struct type *type, int fieldno)
2888 {
2889 struct value *retval;
2890
2891 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2892 {
2893 case FIELD_LOC_KIND_PHYSADDR:
2894 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2895 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2896 break;
2897 case FIELD_LOC_KIND_PHYSNAME:
2898 {
2899 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2900 /* TYPE_FIELD_NAME (type, fieldno); */
2901 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2902
2903 if (sym == NULL)
2904 {
2905 /* With some compilers, e.g. HP aCC, static data members are
2906 reported as non-debuggable symbols. */
2907 struct bound_minimal_symbol msym
2908 = lookup_minimal_symbol (phys_name, NULL, NULL);
2909
2910 if (!msym.minsym)
2911 return allocate_optimized_out_value (type);
2912 else
2913 {
2914 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2915 BMSYMBOL_VALUE_ADDRESS (msym));
2916 }
2917 }
2918 else
2919 retval = value_of_variable (sym, NULL);
2920 break;
2921 }
2922 default:
2923 gdb_assert_not_reached ("unexpected field location kind");
2924 }
2925
2926 return retval;
2927 }
2928
2929 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2930 You have to be careful here, since the size of the data area for the value
2931 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2932 than the old enclosing type, you have to allocate more space for the
2933 data. */
2934
2935 void
2936 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2937 {
2938 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2939 val->contents =
2940 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2941
2942 val->enclosing_type = new_encl_type;
2943 }
2944
2945 /* Given a value ARG1 (offset by OFFSET bytes)
2946 of a struct or union type ARG_TYPE,
2947 extract and return the value of one of its (non-static) fields.
2948 FIELDNO says which field. */
2949
2950 struct value *
2951 value_primitive_field (struct value *arg1, int offset,
2952 int fieldno, struct type *arg_type)
2953 {
2954 struct value *v;
2955 struct type *type;
2956
2957 CHECK_TYPEDEF (arg_type);
2958 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2959
2960 /* Call check_typedef on our type to make sure that, if TYPE
2961 is a TYPE_CODE_TYPEDEF, its length is set to the length
2962 of the target type instead of zero. However, we do not
2963 replace the typedef type by the target type, because we want
2964 to keep the typedef in order to be able to print the type
2965 description correctly. */
2966 check_typedef (type);
2967
2968 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2969 {
2970 /* Handle packed fields.
2971
2972 Create a new value for the bitfield, with bitpos and bitsize
2973 set. If possible, arrange offset and bitpos so that we can
2974 do a single aligned read of the size of the containing type.
2975 Otherwise, adjust offset to the byte containing the first
2976 bit. Assume that the address, offset, and embedded offset
2977 are sufficiently aligned. */
2978
2979 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2980 int container_bitsize = TYPE_LENGTH (type) * 8;
2981
2982 v = allocate_value_lazy (type);
2983 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2984 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2985 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2986 v->bitpos = bitpos % container_bitsize;
2987 else
2988 v->bitpos = bitpos % 8;
2989 v->offset = (value_embedded_offset (arg1)
2990 + offset
2991 + (bitpos - v->bitpos) / 8);
2992 set_value_parent (v, arg1);
2993 if (!value_lazy (arg1))
2994 value_fetch_lazy (v);
2995 }
2996 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2997 {
2998 /* This field is actually a base subobject, so preserve the
2999 entire object's contents for later references to virtual
3000 bases, etc. */
3001 int boffset;
3002
3003 /* Lazy register values with offsets are not supported. */
3004 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3005 value_fetch_lazy (arg1);
3006
3007 /* We special case virtual inheritance here because this
3008 requires access to the contents, which we would rather avoid
3009 for references to ordinary fields of unavailable values. */
3010 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3011 boffset = baseclass_offset (arg_type, fieldno,
3012 value_contents (arg1),
3013 value_embedded_offset (arg1),
3014 value_address (arg1),
3015 arg1);
3016 else
3017 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3018
3019 if (value_lazy (arg1))
3020 v = allocate_value_lazy (value_enclosing_type (arg1));
3021 else
3022 {
3023 v = allocate_value (value_enclosing_type (arg1));
3024 value_contents_copy_raw (v, 0, arg1, 0,
3025 TYPE_LENGTH (value_enclosing_type (arg1)));
3026 }
3027 v->type = type;
3028 v->offset = value_offset (arg1);
3029 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3030 }
3031 else
3032 {
3033 /* Plain old data member */
3034 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3035
3036 /* Lazy register values with offsets are not supported. */
3037 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3038 value_fetch_lazy (arg1);
3039
3040 if (value_lazy (arg1))
3041 v = allocate_value_lazy (type);
3042 else
3043 {
3044 v = allocate_value (type);
3045 value_contents_copy_raw (v, value_embedded_offset (v),
3046 arg1, value_embedded_offset (arg1) + offset,
3047 TYPE_LENGTH (type));
3048 }
3049 v->offset = (value_offset (arg1) + offset
3050 + value_embedded_offset (arg1));
3051 }
3052 set_value_component_location (v, arg1);
3053 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3054 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3055 return v;
3056 }
3057
3058 /* Given a value ARG1 of a struct or union type,
3059 extract and return the value of one of its (non-static) fields.
3060 FIELDNO says which field. */
3061
3062 struct value *
3063 value_field (struct value *arg1, int fieldno)
3064 {
3065 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3066 }
3067
3068 /* Return a non-virtual function as a value.
3069 F is the list of member functions which contains the desired method.
3070 J is an index into F which provides the desired method.
3071
3072 We only use the symbol for its address, so be happy with either a
3073 full symbol or a minimal symbol. */
3074
3075 struct value *
3076 value_fn_field (struct value **arg1p, struct fn_field *f,
3077 int j, struct type *type,
3078 int offset)
3079 {
3080 struct value *v;
3081 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3082 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3083 struct symbol *sym;
3084 struct bound_minimal_symbol msym;
3085
3086 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
3087 if (sym != NULL)
3088 {
3089 memset (&msym, 0, sizeof (msym));
3090 }
3091 else
3092 {
3093 gdb_assert (sym == NULL);
3094 msym = lookup_bound_minimal_symbol (physname);
3095 if (msym.minsym == NULL)
3096 return NULL;
3097 }
3098
3099 v = allocate_value (ftype);
3100 if (sym)
3101 {
3102 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3103 }
3104 else
3105 {
3106 /* The minimal symbol might point to a function descriptor;
3107 resolve it to the actual code address instead. */
3108 struct objfile *objfile = msym.objfile;
3109 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3110
3111 set_value_address (v,
3112 gdbarch_convert_from_func_ptr_addr
3113 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3114 }
3115
3116 if (arg1p)
3117 {
3118 if (type != value_type (*arg1p))
3119 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3120 value_addr (*arg1p)));
3121
3122 /* Move the `this' pointer according to the offset.
3123 VALUE_OFFSET (*arg1p) += offset; */
3124 }
3125
3126 return v;
3127 }
3128
3129 \f
3130
3131 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3132 VALADDR, and store the result in *RESULT.
3133 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3134
3135 Extracting bits depends on endianness of the machine. Compute the
3136 number of least significant bits to discard. For big endian machines,
3137 we compute the total number of bits in the anonymous object, subtract
3138 off the bit count from the MSB of the object to the MSB of the
3139 bitfield, then the size of the bitfield, which leaves the LSB discard
3140 count. For little endian machines, the discard count is simply the
3141 number of bits from the LSB of the anonymous object to the LSB of the
3142 bitfield.
3143
3144 If the field is signed, we also do sign extension. */
3145
3146 static LONGEST
3147 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3148 int bitpos, int bitsize)
3149 {
3150 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3151 ULONGEST val;
3152 ULONGEST valmask;
3153 int lsbcount;
3154 int bytes_read;
3155 int read_offset;
3156
3157 /* Read the minimum number of bytes required; there may not be
3158 enough bytes to read an entire ULONGEST. */
3159 CHECK_TYPEDEF (field_type);
3160 if (bitsize)
3161 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3162 else
3163 bytes_read = TYPE_LENGTH (field_type);
3164
3165 read_offset = bitpos / 8;
3166
3167 val = extract_unsigned_integer (valaddr + read_offset,
3168 bytes_read, byte_order);
3169
3170 /* Extract bits. See comment above. */
3171
3172 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3173 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3174 else
3175 lsbcount = (bitpos % 8);
3176 val >>= lsbcount;
3177
3178 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3179 If the field is signed, and is negative, then sign extend. */
3180
3181 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3182 {
3183 valmask = (((ULONGEST) 1) << bitsize) - 1;
3184 val &= valmask;
3185 if (!TYPE_UNSIGNED (field_type))
3186 {
3187 if (val & (valmask ^ (valmask >> 1)))
3188 {
3189 val |= ~valmask;
3190 }
3191 }
3192 }
3193
3194 return val;
3195 }
3196
3197 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3198 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3199 ORIGINAL_VALUE, which must not be NULL. See
3200 unpack_value_bits_as_long for more details. */
3201
3202 int
3203 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3204 int embedded_offset, int fieldno,
3205 const struct value *val, LONGEST *result)
3206 {
3207 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3208 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3209 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3210 int bit_offset;
3211
3212 gdb_assert (val != NULL);
3213
3214 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3215 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3216 || !value_bits_available (val, bit_offset, bitsize))
3217 return 0;
3218
3219 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3220 bitpos, bitsize);
3221 return 1;
3222 }
3223
3224 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3225 object at VALADDR. See unpack_bits_as_long for more details. */
3226
3227 LONGEST
3228 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3229 {
3230 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3231 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3232 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3233
3234 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3235 }
3236
3237 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3238 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3239 the contents in DEST_VAL, zero or sign extending if the type of
3240 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3241 VAL. If the VAL's contents required to extract the bitfield from
3242 are unavailable/optimized out, DEST_VAL is correspondingly
3243 marked unavailable/optimized out. */
3244
3245 void
3246 unpack_value_bitfield (struct value *dest_val,
3247 int bitpos, int bitsize,
3248 const gdb_byte *valaddr, int embedded_offset,
3249 const struct value *val)
3250 {
3251 enum bfd_endian byte_order;
3252 int src_bit_offset;
3253 int dst_bit_offset;
3254 LONGEST num;
3255 struct type *field_type = value_type (dest_val);
3256
3257 /* First, unpack and sign extend the bitfield as if it was wholly
3258 available. Invalid/unavailable bits are read as zero, but that's
3259 OK, as they'll end up marked below. */
3260 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3261 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3262 bitpos, bitsize);
3263 store_signed_integer (value_contents_raw (dest_val),
3264 TYPE_LENGTH (field_type), byte_order, num);
3265
3266 /* Now copy the optimized out / unavailability ranges to the right
3267 bits. */
3268 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3269 if (byte_order == BFD_ENDIAN_BIG)
3270 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3271 else
3272 dst_bit_offset = 0;
3273 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3274 val, src_bit_offset, bitsize);
3275 }
3276
3277 /* Return a new value with type TYPE, which is FIELDNO field of the
3278 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3279 of VAL. If the VAL's contents required to extract the bitfield
3280 from are unavailable/optimized out, the new value is
3281 correspondingly marked unavailable/optimized out. */
3282
3283 struct value *
3284 value_field_bitfield (struct type *type, int fieldno,
3285 const gdb_byte *valaddr,
3286 int embedded_offset, const struct value *val)
3287 {
3288 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3289 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3290 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3291
3292 unpack_value_bitfield (res_val, bitpos, bitsize,
3293 valaddr, embedded_offset, val);
3294
3295 return res_val;
3296 }
3297
3298 /* Modify the value of a bitfield. ADDR points to a block of memory in
3299 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3300 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3301 indicate which bits (in target bit order) comprise the bitfield.
3302 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3303 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3304
3305 void
3306 modify_field (struct type *type, gdb_byte *addr,
3307 LONGEST fieldval, int bitpos, int bitsize)
3308 {
3309 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3310 ULONGEST oword;
3311 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3312 int bytesize;
3313
3314 /* Normalize BITPOS. */
3315 addr += bitpos / 8;
3316 bitpos %= 8;
3317
3318 /* If a negative fieldval fits in the field in question, chop
3319 off the sign extension bits. */
3320 if ((~fieldval & ~(mask >> 1)) == 0)
3321 fieldval &= mask;
3322
3323 /* Warn if value is too big to fit in the field in question. */
3324 if (0 != (fieldval & ~mask))
3325 {
3326 /* FIXME: would like to include fieldval in the message, but
3327 we don't have a sprintf_longest. */
3328 warning (_("Value does not fit in %d bits."), bitsize);
3329
3330 /* Truncate it, otherwise adjoining fields may be corrupted. */
3331 fieldval &= mask;
3332 }
3333
3334 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3335 false valgrind reports. */
3336
3337 bytesize = (bitpos + bitsize + 7) / 8;
3338 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3339
3340 /* Shifting for bit field depends on endianness of the target machine. */
3341 if (gdbarch_bits_big_endian (get_type_arch (type)))
3342 bitpos = bytesize * 8 - bitpos - bitsize;
3343
3344 oword &= ~(mask << bitpos);
3345 oword |= fieldval << bitpos;
3346
3347 store_unsigned_integer (addr, bytesize, byte_order, oword);
3348 }
3349 \f
3350 /* Pack NUM into BUF using a target format of TYPE. */
3351
3352 void
3353 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3354 {
3355 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3356 int len;
3357
3358 type = check_typedef (type);
3359 len = TYPE_LENGTH (type);
3360
3361 switch (TYPE_CODE (type))
3362 {
3363 case TYPE_CODE_INT:
3364 case TYPE_CODE_CHAR:
3365 case TYPE_CODE_ENUM:
3366 case TYPE_CODE_FLAGS:
3367 case TYPE_CODE_BOOL:
3368 case TYPE_CODE_RANGE:
3369 case TYPE_CODE_MEMBERPTR:
3370 store_signed_integer (buf, len, byte_order, num);
3371 break;
3372
3373 case TYPE_CODE_REF:
3374 case TYPE_CODE_PTR:
3375 store_typed_address (buf, type, (CORE_ADDR) num);
3376 break;
3377
3378 default:
3379 error (_("Unexpected type (%d) encountered for integer constant."),
3380 TYPE_CODE (type));
3381 }
3382 }
3383
3384
3385 /* Pack NUM into BUF using a target format of TYPE. */
3386
3387 static void
3388 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3389 {
3390 int len;
3391 enum bfd_endian byte_order;
3392
3393 type = check_typedef (type);
3394 len = TYPE_LENGTH (type);
3395 byte_order = gdbarch_byte_order (get_type_arch (type));
3396
3397 switch (TYPE_CODE (type))
3398 {
3399 case TYPE_CODE_INT:
3400 case TYPE_CODE_CHAR:
3401 case TYPE_CODE_ENUM:
3402 case TYPE_CODE_FLAGS:
3403 case TYPE_CODE_BOOL:
3404 case TYPE_CODE_RANGE:
3405 case TYPE_CODE_MEMBERPTR:
3406 store_unsigned_integer (buf, len, byte_order, num);
3407 break;
3408
3409 case TYPE_CODE_REF:
3410 case TYPE_CODE_PTR:
3411 store_typed_address (buf, type, (CORE_ADDR) num);
3412 break;
3413
3414 default:
3415 error (_("Unexpected type (%d) encountered "
3416 "for unsigned integer constant."),
3417 TYPE_CODE (type));
3418 }
3419 }
3420
3421
3422 /* Convert C numbers into newly allocated values. */
3423
3424 struct value *
3425 value_from_longest (struct type *type, LONGEST num)
3426 {
3427 struct value *val = allocate_value (type);
3428
3429 pack_long (value_contents_raw (val), type, num);
3430 return val;
3431 }
3432
3433
3434 /* Convert C unsigned numbers into newly allocated values. */
3435
3436 struct value *
3437 value_from_ulongest (struct type *type, ULONGEST num)
3438 {
3439 struct value *val = allocate_value (type);
3440
3441 pack_unsigned_long (value_contents_raw (val), type, num);
3442
3443 return val;
3444 }
3445
3446
3447 /* Create a value representing a pointer of type TYPE to the address
3448 ADDR. */
3449
3450 struct value *
3451 value_from_pointer (struct type *type, CORE_ADDR addr)
3452 {
3453 struct value *val = allocate_value (type);
3454
3455 store_typed_address (value_contents_raw (val),
3456 check_typedef (type), addr);
3457 return val;
3458 }
3459
3460
3461 /* Create a value of type TYPE whose contents come from VALADDR, if it
3462 is non-null, and whose memory address (in the inferior) is
3463 ADDRESS. The type of the created value may differ from the passed
3464 type TYPE. Make sure to retrieve values new type after this call.
3465 Note that TYPE is not passed through resolve_dynamic_type; this is
3466 a special API intended for use only by Ada. */
3467
3468 struct value *
3469 value_from_contents_and_address_unresolved (struct type *type,
3470 const gdb_byte *valaddr,
3471 CORE_ADDR address)
3472 {
3473 struct value *v;
3474
3475 if (valaddr == NULL)
3476 v = allocate_value_lazy (type);
3477 else
3478 v = value_from_contents (type, valaddr);
3479 set_value_address (v, address);
3480 VALUE_LVAL (v) = lval_memory;
3481 return v;
3482 }
3483
3484 /* Create a value of type TYPE whose contents come from VALADDR, if it
3485 is non-null, and whose memory address (in the inferior) is
3486 ADDRESS. The type of the created value may differ from the passed
3487 type TYPE. Make sure to retrieve values new type after this call. */
3488
3489 struct value *
3490 value_from_contents_and_address (struct type *type,
3491 const gdb_byte *valaddr,
3492 CORE_ADDR address)
3493 {
3494 struct type *resolved_type = resolve_dynamic_type (type, address);
3495 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3496 struct value *v;
3497
3498 if (valaddr == NULL)
3499 v = allocate_value_lazy (resolved_type);
3500 else
3501 v = value_from_contents (resolved_type, valaddr);
3502 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3503 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3504 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3505 set_value_address (v, address);
3506 VALUE_LVAL (v) = lval_memory;
3507 return v;
3508 }
3509
3510 /* Create a value of type TYPE holding the contents CONTENTS.
3511 The new value is `not_lval'. */
3512
3513 struct value *
3514 value_from_contents (struct type *type, const gdb_byte *contents)
3515 {
3516 struct value *result;
3517
3518 result = allocate_value (type);
3519 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3520 return result;
3521 }
3522
3523 struct value *
3524 value_from_double (struct type *type, DOUBLEST num)
3525 {
3526 struct value *val = allocate_value (type);
3527 struct type *base_type = check_typedef (type);
3528 enum type_code code = TYPE_CODE (base_type);
3529
3530 if (code == TYPE_CODE_FLT)
3531 {
3532 store_typed_floating (value_contents_raw (val), base_type, num);
3533 }
3534 else
3535 error (_("Unexpected type encountered for floating constant."));
3536
3537 return val;
3538 }
3539
3540 struct value *
3541 value_from_decfloat (struct type *type, const gdb_byte *dec)
3542 {
3543 struct value *val = allocate_value (type);
3544
3545 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3546 return val;
3547 }
3548
3549 /* Extract a value from the history file. Input will be of the form
3550 $digits or $$digits. See block comment above 'write_dollar_variable'
3551 for details. */
3552
3553 struct value *
3554 value_from_history_ref (const char *h, const char **endp)
3555 {
3556 int index, len;
3557
3558 if (h[0] == '$')
3559 len = 1;
3560 else
3561 return NULL;
3562
3563 if (h[1] == '$')
3564 len = 2;
3565
3566 /* Find length of numeral string. */
3567 for (; isdigit (h[len]); len++)
3568 ;
3569
3570 /* Make sure numeral string is not part of an identifier. */
3571 if (h[len] == '_' || isalpha (h[len]))
3572 return NULL;
3573
3574 /* Now collect the index value. */
3575 if (h[1] == '$')
3576 {
3577 if (len == 2)
3578 {
3579 /* For some bizarre reason, "$$" is equivalent to "$$1",
3580 rather than to "$$0" as it ought to be! */
3581 index = -1;
3582 *endp += len;
3583 }
3584 else
3585 {
3586 char *local_end;
3587
3588 index = -strtol (&h[2], &local_end, 10);
3589 *endp = local_end;
3590 }
3591 }
3592 else
3593 {
3594 if (len == 1)
3595 {
3596 /* "$" is equivalent to "$0". */
3597 index = 0;
3598 *endp += len;
3599 }
3600 else
3601 {
3602 char *local_end;
3603
3604 index = strtol (&h[1], &local_end, 10);
3605 *endp = local_end;
3606 }
3607 }
3608
3609 return access_value_history (index);
3610 }
3611
3612 struct value *
3613 coerce_ref_if_computed (const struct value *arg)
3614 {
3615 const struct lval_funcs *funcs;
3616
3617 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3618 return NULL;
3619
3620 if (value_lval_const (arg) != lval_computed)
3621 return NULL;
3622
3623 funcs = value_computed_funcs (arg);
3624 if (funcs->coerce_ref == NULL)
3625 return NULL;
3626
3627 return funcs->coerce_ref (arg);
3628 }
3629
3630 /* Look at value.h for description. */
3631
3632 struct value *
3633 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3634 struct type *original_type,
3635 struct value *original_value)
3636 {
3637 /* Re-adjust type. */
3638 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3639
3640 /* Add embedding info. */
3641 set_value_enclosing_type (value, enc_type);
3642 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3643
3644 /* We may be pointing to an object of some derived type. */
3645 return value_full_object (value, NULL, 0, 0, 0);
3646 }
3647
3648 struct value *
3649 coerce_ref (struct value *arg)
3650 {
3651 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3652 struct value *retval;
3653 struct type *enc_type;
3654
3655 retval = coerce_ref_if_computed (arg);
3656 if (retval)
3657 return retval;
3658
3659 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3660 return arg;
3661
3662 enc_type = check_typedef (value_enclosing_type (arg));
3663 enc_type = TYPE_TARGET_TYPE (enc_type);
3664
3665 retval = value_at_lazy (enc_type,
3666 unpack_pointer (value_type (arg),
3667 value_contents (arg)));
3668 enc_type = value_type (retval);
3669 return readjust_indirect_value_type (retval, enc_type,
3670 value_type_arg_tmp, arg);
3671 }
3672
3673 struct value *
3674 coerce_array (struct value *arg)
3675 {
3676 struct type *type;
3677
3678 arg = coerce_ref (arg);
3679 type = check_typedef (value_type (arg));
3680
3681 switch (TYPE_CODE (type))
3682 {
3683 case TYPE_CODE_ARRAY:
3684 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3685 arg = value_coerce_array (arg);
3686 break;
3687 case TYPE_CODE_FUNC:
3688 arg = value_coerce_function (arg);
3689 break;
3690 }
3691 return arg;
3692 }
3693 \f
3694
3695 /* Return the return value convention that will be used for the
3696 specified type. */
3697
3698 enum return_value_convention
3699 struct_return_convention (struct gdbarch *gdbarch,
3700 struct value *function, struct type *value_type)
3701 {
3702 enum type_code code = TYPE_CODE (value_type);
3703
3704 if (code == TYPE_CODE_ERROR)
3705 error (_("Function return type unknown."));
3706
3707 /* Probe the architecture for the return-value convention. */
3708 return gdbarch_return_value (gdbarch, function, value_type,
3709 NULL, NULL, NULL);
3710 }
3711
3712 /* Return true if the function returning the specified type is using
3713 the convention of returning structures in memory (passing in the
3714 address as a hidden first parameter). */
3715
3716 int
3717 using_struct_return (struct gdbarch *gdbarch,
3718 struct value *function, struct type *value_type)
3719 {
3720 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3721 /* A void return value is never in memory. See also corresponding
3722 code in "print_return_value". */
3723 return 0;
3724
3725 return (struct_return_convention (gdbarch, function, value_type)
3726 != RETURN_VALUE_REGISTER_CONVENTION);
3727 }
3728
3729 /* Set the initialized field in a value struct. */
3730
3731 void
3732 set_value_initialized (struct value *val, int status)
3733 {
3734 val->initialized = status;
3735 }
3736
3737 /* Return the initialized field in a value struct. */
3738
3739 int
3740 value_initialized (struct value *val)
3741 {
3742 return val->initialized;
3743 }
3744
3745 /* Called only from the value_contents and value_contents_all()
3746 macros, if the current data for a variable needs to be loaded into
3747 value_contents(VAL). Fetches the data from the user's process, and
3748 clears the lazy flag to indicate that the data in the buffer is
3749 valid.
3750
3751 If the value is zero-length, we avoid calling read_memory, which
3752 would abort. We mark the value as fetched anyway -- all 0 bytes of
3753 it.
3754
3755 This function returns a value because it is used in the
3756 value_contents macro as part of an expression, where a void would
3757 not work. The value is ignored. */
3758
3759 int
3760 value_fetch_lazy (struct value *val)
3761 {
3762 gdb_assert (value_lazy (val));
3763 allocate_value_contents (val);
3764 /* A value is either lazy, or fully fetched. The
3765 availability/validity is only established as we try to fetch a
3766 value. */
3767 gdb_assert (VEC_empty (range_s, val->optimized_out));
3768 gdb_assert (VEC_empty (range_s, val->unavailable));
3769 if (value_bitsize (val))
3770 {
3771 /* To read a lazy bitfield, read the entire enclosing value. This
3772 prevents reading the same block of (possibly volatile) memory once
3773 per bitfield. It would be even better to read only the containing
3774 word, but we have no way to record that just specific bits of a
3775 value have been fetched. */
3776 struct type *type = check_typedef (value_type (val));
3777 struct value *parent = value_parent (val);
3778
3779 if (value_lazy (parent))
3780 value_fetch_lazy (parent);
3781
3782 unpack_value_bitfield (val,
3783 value_bitpos (val), value_bitsize (val),
3784 value_contents_for_printing (parent),
3785 value_offset (val), parent);
3786 }
3787 else if (VALUE_LVAL (val) == lval_memory)
3788 {
3789 CORE_ADDR addr = value_address (val);
3790 struct type *type = check_typedef (value_enclosing_type (val));
3791
3792 if (TYPE_LENGTH (type))
3793 read_value_memory (val, 0, value_stack (val),
3794 addr, value_contents_all_raw (val),
3795 TYPE_LENGTH (type));
3796 }
3797 else if (VALUE_LVAL (val) == lval_register)
3798 {
3799 struct frame_info *frame;
3800 int regnum;
3801 struct type *type = check_typedef (value_type (val));
3802 struct value *new_val = val, *mark = value_mark ();
3803
3804 /* Offsets are not supported here; lazy register values must
3805 refer to the entire register. */
3806 gdb_assert (value_offset (val) == 0);
3807
3808 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3809 {
3810 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3811
3812 frame = frame_find_by_id (frame_id);
3813 regnum = VALUE_REGNUM (new_val);
3814
3815 gdb_assert (frame != NULL);
3816
3817 /* Convertible register routines are used for multi-register
3818 values and for interpretation in different types
3819 (e.g. float or int from a double register). Lazy
3820 register values should have the register's natural type,
3821 so they do not apply. */
3822 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3823 regnum, type));
3824
3825 new_val = get_frame_register_value (frame, regnum);
3826
3827 /* If we get another lazy lval_register value, it means the
3828 register is found by reading it from the next frame.
3829 get_frame_register_value should never return a value with
3830 the frame id pointing to FRAME. If it does, it means we
3831 either have two consecutive frames with the same frame id
3832 in the frame chain, or some code is trying to unwind
3833 behind get_prev_frame's back (e.g., a frame unwind
3834 sniffer trying to unwind), bypassing its validations. In
3835 any case, it should always be an internal error to end up
3836 in this situation. */
3837 if (VALUE_LVAL (new_val) == lval_register
3838 && value_lazy (new_val)
3839 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3840 internal_error (__FILE__, __LINE__,
3841 _("infinite loop while fetching a register"));
3842 }
3843
3844 /* If it's still lazy (for instance, a saved register on the
3845 stack), fetch it. */
3846 if (value_lazy (new_val))
3847 value_fetch_lazy (new_val);
3848
3849 /* Copy the contents and the unavailability/optimized-out
3850 meta-data from NEW_VAL to VAL. */
3851 set_value_lazy (val, 0);
3852 value_contents_copy (val, value_embedded_offset (val),
3853 new_val, value_embedded_offset (new_val),
3854 TYPE_LENGTH (type));
3855
3856 if (frame_debug)
3857 {
3858 struct gdbarch *gdbarch;
3859 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3860 regnum = VALUE_REGNUM (val);
3861 gdbarch = get_frame_arch (frame);
3862
3863 fprintf_unfiltered (gdb_stdlog,
3864 "{ value_fetch_lazy "
3865 "(frame=%d,regnum=%d(%s),...) ",
3866 frame_relative_level (frame), regnum,
3867 user_reg_map_regnum_to_name (gdbarch, regnum));
3868
3869 fprintf_unfiltered (gdb_stdlog, "->");
3870 if (value_optimized_out (new_val))
3871 {
3872 fprintf_unfiltered (gdb_stdlog, " ");
3873 val_print_optimized_out (new_val, gdb_stdlog);
3874 }
3875 else
3876 {
3877 int i;
3878 const gdb_byte *buf = value_contents (new_val);
3879
3880 if (VALUE_LVAL (new_val) == lval_register)
3881 fprintf_unfiltered (gdb_stdlog, " register=%d",
3882 VALUE_REGNUM (new_val));
3883 else if (VALUE_LVAL (new_val) == lval_memory)
3884 fprintf_unfiltered (gdb_stdlog, " address=%s",
3885 paddress (gdbarch,
3886 value_address (new_val)));
3887 else
3888 fprintf_unfiltered (gdb_stdlog, " computed");
3889
3890 fprintf_unfiltered (gdb_stdlog, " bytes=");
3891 fprintf_unfiltered (gdb_stdlog, "[");
3892 for (i = 0; i < register_size (gdbarch, regnum); i++)
3893 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3894 fprintf_unfiltered (gdb_stdlog, "]");
3895 }
3896
3897 fprintf_unfiltered (gdb_stdlog, " }\n");
3898 }
3899
3900 /* Dispose of the intermediate values. This prevents
3901 watchpoints from trying to watch the saved frame pointer. */
3902 value_free_to_mark (mark);
3903 }
3904 else if (VALUE_LVAL (val) == lval_computed
3905 && value_computed_funcs (val)->read != NULL)
3906 value_computed_funcs (val)->read (val);
3907 else
3908 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3909
3910 set_value_lazy (val, 0);
3911 return 0;
3912 }
3913
3914 /* Implementation of the convenience function $_isvoid. */
3915
3916 static struct value *
3917 isvoid_internal_fn (struct gdbarch *gdbarch,
3918 const struct language_defn *language,
3919 void *cookie, int argc, struct value **argv)
3920 {
3921 int ret;
3922
3923 if (argc != 1)
3924 error (_("You must provide one argument for $_isvoid."));
3925
3926 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3927
3928 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3929 }
3930
3931 void
3932 _initialize_values (void)
3933 {
3934 add_cmd ("convenience", no_class, show_convenience, _("\
3935 Debugger convenience (\"$foo\") variables and functions.\n\
3936 Convenience variables are created when you assign them values;\n\
3937 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3938 \n\
3939 A few convenience variables are given values automatically:\n\
3940 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3941 \"$__\" holds the contents of the last address examined with \"x\"."
3942 #ifdef HAVE_PYTHON
3943 "\n\n\
3944 Convenience functions are defined via the Python API."
3945 #endif
3946 ), &showlist);
3947 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3948
3949 add_cmd ("values", no_set_class, show_values, _("\
3950 Elements of value history around item number IDX (or last ten)."),
3951 &showlist);
3952
3953 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3954 Initialize a convenience variable if necessary.\n\
3955 init-if-undefined VARIABLE = EXPRESSION\n\
3956 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3957 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3958 VARIABLE is already initialized."));
3959
3960 add_prefix_cmd ("function", no_class, function_command, _("\
3961 Placeholder command for showing help on convenience functions."),
3962 &functionlist, "function ", 0, &cmdlist);
3963
3964 add_internal_function ("_isvoid", _("\
3965 Check whether an expression is void.\n\
3966 Usage: $_isvoid (expression)\n\
3967 Return 1 if the expression is void, zero otherwise."),
3968 isvoid_internal_fn, NULL);
3969 }
This page took 0.263843 seconds and 4 git commands to generate.