6620f9682a062377a4e850dd85e59b3ec555f606
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "exceptions.h"
39 #include "extension.h"
40 #include <ctype.h>
41 #include "tracepoint.h"
42 #include "cp-abi.h"
43 #include "user-regs.h"
44
45 /* Prototypes for exported functions. */
46
47 void _initialize_values (void);
48
49 /* Definition of a user function. */
50 struct internal_function
51 {
52 /* The name of the function. It is a bit odd to have this in the
53 function itself -- the user might use a differently-named
54 convenience variable to hold the function. */
55 char *name;
56
57 /* The handler. */
58 internal_function_fn handler;
59
60 /* User data for the handler. */
61 void *cookie;
62 };
63
64 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
65
66 struct range
67 {
68 /* Lowest offset in the range. */
69 int offset;
70
71 /* Length of the range. */
72 int length;
73 };
74
75 typedef struct range range_s;
76
77 DEF_VEC_O(range_s);
78
79 /* Returns true if the ranges defined by [offset1, offset1+len1) and
80 [offset2, offset2+len2) overlap. */
81
82 static int
83 ranges_overlap (int offset1, int len1,
84 int offset2, int len2)
85 {
86 ULONGEST h, l;
87
88 l = max (offset1, offset2);
89 h = min (offset1 + len1, offset2 + len2);
90 return (l < h);
91 }
92
93 /* Returns true if the first argument is strictly less than the
94 second, useful for VEC_lower_bound. We keep ranges sorted by
95 offset and coalesce overlapping and contiguous ranges, so this just
96 compares the starting offset. */
97
98 static int
99 range_lessthan (const range_s *r1, const range_s *r2)
100 {
101 return r1->offset < r2->offset;
102 }
103
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 OFFSET+LENGTH). */
106
107 static int
108 ranges_contain (VEC(range_s) *ranges, int offset, int length)
109 {
110 range_s what;
111 int i;
112
113 what.offset = offset;
114 what.length = length;
115
116 /* We keep ranges sorted by offset and coalesce overlapping and
117 contiguous ranges, so to check if a range list contains a given
118 range, we can do a binary search for the position the given range
119 would be inserted if we only considered the starting OFFSET of
120 ranges. We call that position I. Since we also have LENGTH to
121 care for (this is a range afterall), we need to check if the
122 _previous_ range overlaps the I range. E.g.,
123
124 R
125 |---|
126 |---| |---| |------| ... |--|
127 0 1 2 N
128
129 I=1
130
131 In the case above, the binary search would return `I=1', meaning,
132 this OFFSET should be inserted at position 1, and the current
133 position 1 should be pushed further (and before 2). But, `0'
134 overlaps with R.
135
136 Then we need to check if the I range overlaps the I range itself.
137 E.g.,
138
139 R
140 |---|
141 |---| |---| |-------| ... |--|
142 0 1 2 N
143
144 I=1
145 */
146
147 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
148
149 if (i > 0)
150 {
151 struct range *bef = VEC_index (range_s, ranges, i - 1);
152
153 if (ranges_overlap (bef->offset, bef->length, offset, length))
154 return 1;
155 }
156
157 if (i < VEC_length (range_s, ranges))
158 {
159 struct range *r = VEC_index (range_s, ranges, i);
160
161 if (ranges_overlap (r->offset, r->length, offset, length))
162 return 1;
163 }
164
165 return 0;
166 }
167
168 static struct cmd_list_element *functionlist;
169
170 /* Note that the fields in this structure are arranged to save a bit
171 of memory. */
172
173 struct value
174 {
175 /* Type of value; either not an lval, or one of the various
176 different possible kinds of lval. */
177 enum lval_type lval;
178
179 /* Is it modifiable? Only relevant if lval != not_lval. */
180 unsigned int modifiable : 1;
181
182 /* If zero, contents of this value are in the contents field. If
183 nonzero, contents are in inferior. If the lval field is lval_memory,
184 the contents are in inferior memory at location.address plus offset.
185 The lval field may also be lval_register.
186
187 WARNING: This field is used by the code which handles watchpoints
188 (see breakpoint.c) to decide whether a particular value can be
189 watched by hardware watchpoints. If the lazy flag is set for
190 some member of a value chain, it is assumed that this member of
191 the chain doesn't need to be watched as part of watching the
192 value itself. This is how GDB avoids watching the entire struct
193 or array when the user wants to watch a single struct member or
194 array element. If you ever change the way lazy flag is set and
195 reset, be sure to consider this use as well! */
196 unsigned int lazy : 1;
197
198 /* If value is a variable, is it initialized or not. */
199 unsigned int initialized : 1;
200
201 /* If value is from the stack. If this is set, read_stack will be
202 used instead of read_memory to enable extra caching. */
203 unsigned int stack : 1;
204
205 /* If the value has been released. */
206 unsigned int released : 1;
207
208 /* Register number if the value is from a register. */
209 short regnum;
210
211 /* Location of value (if lval). */
212 union
213 {
214 /* If lval == lval_memory, this is the address in the inferior.
215 If lval == lval_register, this is the byte offset into the
216 registers structure. */
217 CORE_ADDR address;
218
219 /* Pointer to internal variable. */
220 struct internalvar *internalvar;
221
222 /* Pointer to xmethod worker. */
223 struct xmethod_worker *xm_worker;
224
225 /* If lval == lval_computed, this is a set of function pointers
226 to use to access and describe the value, and a closure pointer
227 for them to use. */
228 struct
229 {
230 /* Functions to call. */
231 const struct lval_funcs *funcs;
232
233 /* Closure for those functions to use. */
234 void *closure;
235 } computed;
236 } location;
237
238 /* Describes offset of a value within lval of a structure in bytes.
239 If lval == lval_memory, this is an offset to the address. If
240 lval == lval_register, this is a further offset from
241 location.address within the registers structure. Note also the
242 member embedded_offset below. */
243 int offset;
244
245 /* Only used for bitfields; number of bits contained in them. */
246 int bitsize;
247
248 /* Only used for bitfields; position of start of field. For
249 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
250 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
251 int bitpos;
252
253 /* The number of references to this value. When a value is created,
254 the value chain holds a reference, so REFERENCE_COUNT is 1. If
255 release_value is called, this value is removed from the chain but
256 the caller of release_value now has a reference to this value.
257 The caller must arrange for a call to value_free later. */
258 int reference_count;
259
260 /* Only used for bitfields; the containing value. This allows a
261 single read from the target when displaying multiple
262 bitfields. */
263 struct value *parent;
264
265 /* Frame register value is relative to. This will be described in
266 the lval enum above as "lval_register". */
267 struct frame_id frame_id;
268
269 /* Type of the value. */
270 struct type *type;
271
272 /* If a value represents a C++ object, then the `type' field gives
273 the object's compile-time type. If the object actually belongs
274 to some class derived from `type', perhaps with other base
275 classes and additional members, then `type' is just a subobject
276 of the real thing, and the full object is probably larger than
277 `type' would suggest.
278
279 If `type' is a dynamic class (i.e. one with a vtable), then GDB
280 can actually determine the object's run-time type by looking at
281 the run-time type information in the vtable. When this
282 information is available, we may elect to read in the entire
283 object, for several reasons:
284
285 - When printing the value, the user would probably rather see the
286 full object, not just the limited portion apparent from the
287 compile-time type.
288
289 - If `type' has virtual base classes, then even printing `type'
290 alone may require reaching outside the `type' portion of the
291 object to wherever the virtual base class has been stored.
292
293 When we store the entire object, `enclosing_type' is the run-time
294 type -- the complete object -- and `embedded_offset' is the
295 offset of `type' within that larger type, in bytes. The
296 value_contents() macro takes `embedded_offset' into account, so
297 most GDB code continues to see the `type' portion of the value,
298 just as the inferior would.
299
300 If `type' is a pointer to an object, then `enclosing_type' is a
301 pointer to the object's run-time type, and `pointed_to_offset' is
302 the offset in bytes from the full object to the pointed-to object
303 -- that is, the value `embedded_offset' would have if we followed
304 the pointer and fetched the complete object. (I don't really see
305 the point. Why not just determine the run-time type when you
306 indirect, and avoid the special case? The contents don't matter
307 until you indirect anyway.)
308
309 If we're not doing anything fancy, `enclosing_type' is equal to
310 `type', and `embedded_offset' is zero, so everything works
311 normally. */
312 struct type *enclosing_type;
313 int embedded_offset;
314 int pointed_to_offset;
315
316 /* Values are stored in a chain, so that they can be deleted easily
317 over calls to the inferior. Values assigned to internal
318 variables, put into the value history or exposed to Python are
319 taken off this list. */
320 struct value *next;
321
322 /* Actual contents of the value. Target byte-order. NULL or not
323 valid if lazy is nonzero. */
324 gdb_byte *contents;
325
326 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
327 rather than available, since the common and default case is for a
328 value to be available. This is filled in at value read time.
329 The unavailable ranges are tracked in bits. Note that a contents
330 bit that has been optimized out doesn't really exist in the
331 program, so it can't be marked unavailable either. */
332 VEC(range_s) *unavailable;
333
334 /* Likewise, but for optimized out contents (a chunk of the value of
335 a variable that does not actually exist in the program). If LVAL
336 is lval_register, this is a register ($pc, $sp, etc., never a
337 program variable) that has not been saved in the frame. Not
338 saved registers and optimized-out program variables values are
339 treated pretty much the same, except not-saved registers have a
340 different string representation and related error strings. */
341 VEC(range_s) *optimized_out;
342 };
343
344 int
345 value_bits_available (const struct value *value, int offset, int length)
346 {
347 gdb_assert (!value->lazy);
348
349 return !ranges_contain (value->unavailable, offset, length);
350 }
351
352 int
353 value_bytes_available (const struct value *value, int offset, int length)
354 {
355 return value_bits_available (value,
356 offset * TARGET_CHAR_BIT,
357 length * TARGET_CHAR_BIT);
358 }
359
360 int
361 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
362 {
363 gdb_assert (!value->lazy);
364
365 return ranges_contain (value->optimized_out, bit_offset, bit_length);
366 }
367
368 int
369 value_entirely_available (struct value *value)
370 {
371 /* We can only tell whether the whole value is available when we try
372 to read it. */
373 if (value->lazy)
374 value_fetch_lazy (value);
375
376 if (VEC_empty (range_s, value->unavailable))
377 return 1;
378 return 0;
379 }
380
381 /* Returns true if VALUE is entirely covered by RANGES. If the value
382 is lazy, it'll be read now. Note that RANGE is a pointer to
383 pointer because reading the value might change *RANGE. */
384
385 static int
386 value_entirely_covered_by_range_vector (struct value *value,
387 VEC(range_s) **ranges)
388 {
389 /* We can only tell whether the whole value is optimized out /
390 unavailable when we try to read it. */
391 if (value->lazy)
392 value_fetch_lazy (value);
393
394 if (VEC_length (range_s, *ranges) == 1)
395 {
396 struct range *t = VEC_index (range_s, *ranges, 0);
397
398 if (t->offset == 0
399 && t->length == (TARGET_CHAR_BIT
400 * TYPE_LENGTH (value_enclosing_type (value))))
401 return 1;
402 }
403
404 return 0;
405 }
406
407 int
408 value_entirely_unavailable (struct value *value)
409 {
410 return value_entirely_covered_by_range_vector (value, &value->unavailable);
411 }
412
413 int
414 value_entirely_optimized_out (struct value *value)
415 {
416 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
417 }
418
419 /* Insert into the vector pointed to by VECTORP the bit range starting of
420 OFFSET bits, and extending for the next LENGTH bits. */
421
422 static void
423 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
424 {
425 range_s newr;
426 int i;
427
428 /* Insert the range sorted. If there's overlap or the new range
429 would be contiguous with an existing range, merge. */
430
431 newr.offset = offset;
432 newr.length = length;
433
434 /* Do a binary search for the position the given range would be
435 inserted if we only considered the starting OFFSET of ranges.
436 Call that position I. Since we also have LENGTH to care for
437 (this is a range afterall), we need to check if the _previous_
438 range overlaps the I range. E.g., calling R the new range:
439
440 #1 - overlaps with previous
441
442 R
443 |-...-|
444 |---| |---| |------| ... |--|
445 0 1 2 N
446
447 I=1
448
449 In the case #1 above, the binary search would return `I=1',
450 meaning, this OFFSET should be inserted at position 1, and the
451 current position 1 should be pushed further (and become 2). But,
452 note that `0' overlaps with R, so we want to merge them.
453
454 A similar consideration needs to be taken if the new range would
455 be contiguous with the previous range:
456
457 #2 - contiguous with previous
458
459 R
460 |-...-|
461 |--| |---| |------| ... |--|
462 0 1 2 N
463
464 I=1
465
466 If there's no overlap with the previous range, as in:
467
468 #3 - not overlapping and not contiguous
469
470 R
471 |-...-|
472 |--| |---| |------| ... |--|
473 0 1 2 N
474
475 I=1
476
477 or if I is 0:
478
479 #4 - R is the range with lowest offset
480
481 R
482 |-...-|
483 |--| |---| |------| ... |--|
484 0 1 2 N
485
486 I=0
487
488 ... we just push the new range to I.
489
490 All the 4 cases above need to consider that the new range may
491 also overlap several of the ranges that follow, or that R may be
492 contiguous with the following range, and merge. E.g.,
493
494 #5 - overlapping following ranges
495
496 R
497 |------------------------|
498 |--| |---| |------| ... |--|
499 0 1 2 N
500
501 I=0
502
503 or:
504
505 R
506 |-------|
507 |--| |---| |------| ... |--|
508 0 1 2 N
509
510 I=1
511
512 */
513
514 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
515 if (i > 0)
516 {
517 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
518
519 if (ranges_overlap (bef->offset, bef->length, offset, length))
520 {
521 /* #1 */
522 ULONGEST l = min (bef->offset, offset);
523 ULONGEST h = max (bef->offset + bef->length, offset + length);
524
525 bef->offset = l;
526 bef->length = h - l;
527 i--;
528 }
529 else if (offset == bef->offset + bef->length)
530 {
531 /* #2 */
532 bef->length += length;
533 i--;
534 }
535 else
536 {
537 /* #3 */
538 VEC_safe_insert (range_s, *vectorp, i, &newr);
539 }
540 }
541 else
542 {
543 /* #4 */
544 VEC_safe_insert (range_s, *vectorp, i, &newr);
545 }
546
547 /* Check whether the ranges following the one we've just added or
548 touched can be folded in (#5 above). */
549 if (i + 1 < VEC_length (range_s, *vectorp))
550 {
551 struct range *t;
552 struct range *r;
553 int removed = 0;
554 int next = i + 1;
555
556 /* Get the range we just touched. */
557 t = VEC_index (range_s, *vectorp, i);
558 removed = 0;
559
560 i = next;
561 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
562 if (r->offset <= t->offset + t->length)
563 {
564 ULONGEST l, h;
565
566 l = min (t->offset, r->offset);
567 h = max (t->offset + t->length, r->offset + r->length);
568
569 t->offset = l;
570 t->length = h - l;
571
572 removed++;
573 }
574 else
575 {
576 /* If we couldn't merge this one, we won't be able to
577 merge following ones either, since the ranges are
578 always sorted by OFFSET. */
579 break;
580 }
581
582 if (removed != 0)
583 VEC_block_remove (range_s, *vectorp, next, removed);
584 }
585 }
586
587 void
588 mark_value_bits_unavailable (struct value *value, int offset, int length)
589 {
590 insert_into_bit_range_vector (&value->unavailable, offset, length);
591 }
592
593 void
594 mark_value_bytes_unavailable (struct value *value, int offset, int length)
595 {
596 mark_value_bits_unavailable (value,
597 offset * TARGET_CHAR_BIT,
598 length * TARGET_CHAR_BIT);
599 }
600
601 /* Find the first range in RANGES that overlaps the range defined by
602 OFFSET and LENGTH, starting at element POS in the RANGES vector,
603 Returns the index into RANGES where such overlapping range was
604 found, or -1 if none was found. */
605
606 static int
607 find_first_range_overlap (VEC(range_s) *ranges, int pos,
608 int offset, int length)
609 {
610 range_s *r;
611 int i;
612
613 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
614 if (ranges_overlap (r->offset, r->length, offset, length))
615 return i;
616
617 return -1;
618 }
619
620 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
621 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
622 return non-zero.
623
624 It must always be the case that:
625 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
626
627 It is assumed that memory can be accessed from:
628 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
629 to:
630 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
631 / TARGET_CHAR_BIT) */
632 static int
633 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
634 const gdb_byte *ptr2, size_t offset2_bits,
635 size_t length_bits)
636 {
637 gdb_assert (offset1_bits % TARGET_CHAR_BIT
638 == offset2_bits % TARGET_CHAR_BIT);
639
640 if (offset1_bits % TARGET_CHAR_BIT != 0)
641 {
642 size_t bits;
643 gdb_byte mask, b1, b2;
644
645 /* The offset from the base pointers PTR1 and PTR2 is not a complete
646 number of bytes. A number of bits up to either the next exact
647 byte boundary, or LENGTH_BITS (which ever is sooner) will be
648 compared. */
649 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
650 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
651 mask = (1 << bits) - 1;
652
653 if (length_bits < bits)
654 {
655 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
656 bits = length_bits;
657 }
658
659 /* Now load the two bytes and mask off the bits we care about. */
660 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
661 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
662
663 if (b1 != b2)
664 return 1;
665
666 /* Now update the length and offsets to take account of the bits
667 we've just compared. */
668 length_bits -= bits;
669 offset1_bits += bits;
670 offset2_bits += bits;
671 }
672
673 if (length_bits % TARGET_CHAR_BIT != 0)
674 {
675 size_t bits;
676 size_t o1, o2;
677 gdb_byte mask, b1, b2;
678
679 /* The length is not an exact number of bytes. After the previous
680 IF.. block then the offsets are byte aligned, or the
681 length is zero (in which case this code is not reached). Compare
682 a number of bits at the end of the region, starting from an exact
683 byte boundary. */
684 bits = length_bits % TARGET_CHAR_BIT;
685 o1 = offset1_bits + length_bits - bits;
686 o2 = offset2_bits + length_bits - bits;
687
688 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
689 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
690
691 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
692 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
693
694 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
695 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
696
697 if (b1 != b2)
698 return 1;
699
700 length_bits -= bits;
701 }
702
703 if (length_bits > 0)
704 {
705 /* We've now taken care of any stray "bits" at the start, or end of
706 the region to compare, the remainder can be covered with a simple
707 memcmp. */
708 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
709 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
710 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
711
712 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
713 ptr2 + offset2_bits / TARGET_CHAR_BIT,
714 length_bits / TARGET_CHAR_BIT);
715 }
716
717 /* Length is zero, regions match. */
718 return 0;
719 }
720
721 /* Helper struct for find_first_range_overlap_and_match and
722 value_contents_bits_eq. Keep track of which slot of a given ranges
723 vector have we last looked at. */
724
725 struct ranges_and_idx
726 {
727 /* The ranges. */
728 VEC(range_s) *ranges;
729
730 /* The range we've last found in RANGES. Given ranges are sorted,
731 we can start the next lookup here. */
732 int idx;
733 };
734
735 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
736 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
737 ranges starting at OFFSET2 bits. Return true if the ranges match
738 and fill in *L and *H with the overlapping window relative to
739 (both) OFFSET1 or OFFSET2. */
740
741 static int
742 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
743 struct ranges_and_idx *rp2,
744 int offset1, int offset2,
745 int length, ULONGEST *l, ULONGEST *h)
746 {
747 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
748 offset1, length);
749 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
750 offset2, length);
751
752 if (rp1->idx == -1 && rp2->idx == -1)
753 {
754 *l = length;
755 *h = length;
756 return 1;
757 }
758 else if (rp1->idx == -1 || rp2->idx == -1)
759 return 0;
760 else
761 {
762 range_s *r1, *r2;
763 ULONGEST l1, h1;
764 ULONGEST l2, h2;
765
766 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
767 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
768
769 /* Get the unavailable windows intersected by the incoming
770 ranges. The first and last ranges that overlap the argument
771 range may be wider than said incoming arguments ranges. */
772 l1 = max (offset1, r1->offset);
773 h1 = min (offset1 + length, r1->offset + r1->length);
774
775 l2 = max (offset2, r2->offset);
776 h2 = min (offset2 + length, offset2 + r2->length);
777
778 /* Make them relative to the respective start offsets, so we can
779 compare them for equality. */
780 l1 -= offset1;
781 h1 -= offset1;
782
783 l2 -= offset2;
784 h2 -= offset2;
785
786 /* Different ranges, no match. */
787 if (l1 != l2 || h1 != h2)
788 return 0;
789
790 *h = h1;
791 *l = l1;
792 return 1;
793 }
794 }
795
796 /* Helper function for value_contents_eq. The only difference is that
797 this function is bit rather than byte based.
798
799 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
800 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
801 Return true if the available bits match. */
802
803 static int
804 value_contents_bits_eq (const struct value *val1, int offset1,
805 const struct value *val2, int offset2,
806 int length)
807 {
808 /* Each array element corresponds to a ranges source (unavailable,
809 optimized out). '1' is for VAL1, '2' for VAL2. */
810 struct ranges_and_idx rp1[2], rp2[2];
811
812 /* See function description in value.h. */
813 gdb_assert (!val1->lazy && !val2->lazy);
814
815 /* We shouldn't be trying to compare past the end of the values. */
816 gdb_assert (offset1 + length
817 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
818 gdb_assert (offset2 + length
819 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
820
821 memset (&rp1, 0, sizeof (rp1));
822 memset (&rp2, 0, sizeof (rp2));
823 rp1[0].ranges = val1->unavailable;
824 rp2[0].ranges = val2->unavailable;
825 rp1[1].ranges = val1->optimized_out;
826 rp2[1].ranges = val2->optimized_out;
827
828 while (length > 0)
829 {
830 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
831 int i;
832
833 for (i = 0; i < 2; i++)
834 {
835 ULONGEST l_tmp, h_tmp;
836
837 /* The contents only match equal if the invalid/unavailable
838 contents ranges match as well. */
839 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
840 offset1, offset2, length,
841 &l_tmp, &h_tmp))
842 return 0;
843
844 /* We're interested in the lowest/first range found. */
845 if (i == 0 || l_tmp < l)
846 {
847 l = l_tmp;
848 h = h_tmp;
849 }
850 }
851
852 /* Compare the available/valid contents. */
853 if (memcmp_with_bit_offsets (val1->contents, offset1,
854 val2->contents, offset2, l) != 0)
855 return 0;
856
857 length -= h;
858 offset1 += h;
859 offset2 += h;
860 }
861
862 return 1;
863 }
864
865 int
866 value_contents_eq (const struct value *val1, int offset1,
867 const struct value *val2, int offset2,
868 int length)
869 {
870 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
871 val2, offset2 * TARGET_CHAR_BIT,
872 length * TARGET_CHAR_BIT);
873 }
874
875 /* Prototypes for local functions. */
876
877 static void show_values (char *, int);
878
879 static void show_convenience (char *, int);
880
881
882 /* The value-history records all the values printed
883 by print commands during this session. Each chunk
884 records 60 consecutive values. The first chunk on
885 the chain records the most recent values.
886 The total number of values is in value_history_count. */
887
888 #define VALUE_HISTORY_CHUNK 60
889
890 struct value_history_chunk
891 {
892 struct value_history_chunk *next;
893 struct value *values[VALUE_HISTORY_CHUNK];
894 };
895
896 /* Chain of chunks now in use. */
897
898 static struct value_history_chunk *value_history_chain;
899
900 static int value_history_count; /* Abs number of last entry stored. */
901
902 \f
903 /* List of all value objects currently allocated
904 (except for those released by calls to release_value)
905 This is so they can be freed after each command. */
906
907 static struct value *all_values;
908
909 /* Allocate a lazy value for type TYPE. Its actual content is
910 "lazily" allocated too: the content field of the return value is
911 NULL; it will be allocated when it is fetched from the target. */
912
913 struct value *
914 allocate_value_lazy (struct type *type)
915 {
916 struct value *val;
917
918 /* Call check_typedef on our type to make sure that, if TYPE
919 is a TYPE_CODE_TYPEDEF, its length is set to the length
920 of the target type instead of zero. However, we do not
921 replace the typedef type by the target type, because we want
922 to keep the typedef in order to be able to set the VAL's type
923 description correctly. */
924 check_typedef (type);
925
926 val = (struct value *) xzalloc (sizeof (struct value));
927 val->contents = NULL;
928 val->next = all_values;
929 all_values = val;
930 val->type = type;
931 val->enclosing_type = type;
932 VALUE_LVAL (val) = not_lval;
933 val->location.address = 0;
934 VALUE_FRAME_ID (val) = null_frame_id;
935 val->offset = 0;
936 val->bitpos = 0;
937 val->bitsize = 0;
938 VALUE_REGNUM (val) = -1;
939 val->lazy = 1;
940 val->embedded_offset = 0;
941 val->pointed_to_offset = 0;
942 val->modifiable = 1;
943 val->initialized = 1; /* Default to initialized. */
944
945 /* Values start out on the all_values chain. */
946 val->reference_count = 1;
947
948 return val;
949 }
950
951 /* Allocate the contents of VAL if it has not been allocated yet. */
952
953 static void
954 allocate_value_contents (struct value *val)
955 {
956 if (!val->contents)
957 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
958 }
959
960 /* Allocate a value and its contents for type TYPE. */
961
962 struct value *
963 allocate_value (struct type *type)
964 {
965 struct value *val = allocate_value_lazy (type);
966
967 allocate_value_contents (val);
968 val->lazy = 0;
969 return val;
970 }
971
972 /* Allocate a value that has the correct length
973 for COUNT repetitions of type TYPE. */
974
975 struct value *
976 allocate_repeat_value (struct type *type, int count)
977 {
978 int low_bound = current_language->string_lower_bound; /* ??? */
979 /* FIXME-type-allocation: need a way to free this type when we are
980 done with it. */
981 struct type *array_type
982 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
983
984 return allocate_value (array_type);
985 }
986
987 struct value *
988 allocate_computed_value (struct type *type,
989 const struct lval_funcs *funcs,
990 void *closure)
991 {
992 struct value *v = allocate_value_lazy (type);
993
994 VALUE_LVAL (v) = lval_computed;
995 v->location.computed.funcs = funcs;
996 v->location.computed.closure = closure;
997
998 return v;
999 }
1000
1001 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1002
1003 struct value *
1004 allocate_optimized_out_value (struct type *type)
1005 {
1006 struct value *retval = allocate_value_lazy (type);
1007
1008 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1009 set_value_lazy (retval, 0);
1010 return retval;
1011 }
1012
1013 /* Accessor methods. */
1014
1015 struct value *
1016 value_next (struct value *value)
1017 {
1018 return value->next;
1019 }
1020
1021 struct type *
1022 value_type (const struct value *value)
1023 {
1024 return value->type;
1025 }
1026 void
1027 deprecated_set_value_type (struct value *value, struct type *type)
1028 {
1029 value->type = type;
1030 }
1031
1032 int
1033 value_offset (const struct value *value)
1034 {
1035 return value->offset;
1036 }
1037 void
1038 set_value_offset (struct value *value, int offset)
1039 {
1040 value->offset = offset;
1041 }
1042
1043 int
1044 value_bitpos (const struct value *value)
1045 {
1046 return value->bitpos;
1047 }
1048 void
1049 set_value_bitpos (struct value *value, int bit)
1050 {
1051 value->bitpos = bit;
1052 }
1053
1054 int
1055 value_bitsize (const struct value *value)
1056 {
1057 return value->bitsize;
1058 }
1059 void
1060 set_value_bitsize (struct value *value, int bit)
1061 {
1062 value->bitsize = bit;
1063 }
1064
1065 struct value *
1066 value_parent (struct value *value)
1067 {
1068 return value->parent;
1069 }
1070
1071 /* See value.h. */
1072
1073 void
1074 set_value_parent (struct value *value, struct value *parent)
1075 {
1076 struct value *old = value->parent;
1077
1078 value->parent = parent;
1079 if (parent != NULL)
1080 value_incref (parent);
1081 value_free (old);
1082 }
1083
1084 gdb_byte *
1085 value_contents_raw (struct value *value)
1086 {
1087 allocate_value_contents (value);
1088 return value->contents + value->embedded_offset;
1089 }
1090
1091 gdb_byte *
1092 value_contents_all_raw (struct value *value)
1093 {
1094 allocate_value_contents (value);
1095 return value->contents;
1096 }
1097
1098 struct type *
1099 value_enclosing_type (struct value *value)
1100 {
1101 return value->enclosing_type;
1102 }
1103
1104 /* Look at value.h for description. */
1105
1106 struct type *
1107 value_actual_type (struct value *value, int resolve_simple_types,
1108 int *real_type_found)
1109 {
1110 struct value_print_options opts;
1111 struct type *result;
1112
1113 get_user_print_options (&opts);
1114
1115 if (real_type_found)
1116 *real_type_found = 0;
1117 result = value_type (value);
1118 if (opts.objectprint)
1119 {
1120 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1121 fetch its rtti type. */
1122 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1123 || TYPE_CODE (result) == TYPE_CODE_REF)
1124 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1125 == TYPE_CODE_STRUCT)
1126 {
1127 struct type *real_type;
1128
1129 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1130 if (real_type)
1131 {
1132 if (real_type_found)
1133 *real_type_found = 1;
1134 result = real_type;
1135 }
1136 }
1137 else if (resolve_simple_types)
1138 {
1139 if (real_type_found)
1140 *real_type_found = 1;
1141 result = value_enclosing_type (value);
1142 }
1143 }
1144
1145 return result;
1146 }
1147
1148 void
1149 error_value_optimized_out (void)
1150 {
1151 error (_("value has been optimized out"));
1152 }
1153
1154 static void
1155 require_not_optimized_out (const struct value *value)
1156 {
1157 if (!VEC_empty (range_s, value->optimized_out))
1158 {
1159 if (value->lval == lval_register)
1160 error (_("register has not been saved in frame"));
1161 else
1162 error_value_optimized_out ();
1163 }
1164 }
1165
1166 static void
1167 require_available (const struct value *value)
1168 {
1169 if (!VEC_empty (range_s, value->unavailable))
1170 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1171 }
1172
1173 const gdb_byte *
1174 value_contents_for_printing (struct value *value)
1175 {
1176 if (value->lazy)
1177 value_fetch_lazy (value);
1178 return value->contents;
1179 }
1180
1181 const gdb_byte *
1182 value_contents_for_printing_const (const struct value *value)
1183 {
1184 gdb_assert (!value->lazy);
1185 return value->contents;
1186 }
1187
1188 const gdb_byte *
1189 value_contents_all (struct value *value)
1190 {
1191 const gdb_byte *result = value_contents_for_printing (value);
1192 require_not_optimized_out (value);
1193 require_available (value);
1194 return result;
1195 }
1196
1197 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1198 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1199
1200 static void
1201 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1202 VEC (range_s) *src_range, int src_bit_offset,
1203 int bit_length)
1204 {
1205 range_s *r;
1206 int i;
1207
1208 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1209 {
1210 ULONGEST h, l;
1211
1212 l = max (r->offset, src_bit_offset);
1213 h = min (r->offset + r->length, src_bit_offset + bit_length);
1214
1215 if (l < h)
1216 insert_into_bit_range_vector (dst_range,
1217 dst_bit_offset + (l - src_bit_offset),
1218 h - l);
1219 }
1220 }
1221
1222 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1223 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1224
1225 static void
1226 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1227 const struct value *src, int src_bit_offset,
1228 int bit_length)
1229 {
1230 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1231 src->unavailable, src_bit_offset,
1232 bit_length);
1233 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1234 src->optimized_out, src_bit_offset,
1235 bit_length);
1236 }
1237
1238 /* Copy LENGTH bytes of SRC value's (all) contents
1239 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1240 contents, starting at DST_OFFSET. If unavailable contents are
1241 being copied from SRC, the corresponding DST contents are marked
1242 unavailable accordingly. Neither DST nor SRC may be lazy
1243 values.
1244
1245 It is assumed the contents of DST in the [DST_OFFSET,
1246 DST_OFFSET+LENGTH) range are wholly available. */
1247
1248 void
1249 value_contents_copy_raw (struct value *dst, int dst_offset,
1250 struct value *src, int src_offset, int length)
1251 {
1252 range_s *r;
1253 int i;
1254 int src_bit_offset, dst_bit_offset, bit_length;
1255
1256 /* A lazy DST would make that this copy operation useless, since as
1257 soon as DST's contents were un-lazied (by a later value_contents
1258 call, say), the contents would be overwritten. A lazy SRC would
1259 mean we'd be copying garbage. */
1260 gdb_assert (!dst->lazy && !src->lazy);
1261
1262 /* The overwritten DST range gets unavailability ORed in, not
1263 replaced. Make sure to remember to implement replacing if it
1264 turns out actually necessary. */
1265 gdb_assert (value_bytes_available (dst, dst_offset, length));
1266 gdb_assert (!value_bits_any_optimized_out (dst,
1267 TARGET_CHAR_BIT * dst_offset,
1268 TARGET_CHAR_BIT * length));
1269
1270 /* Copy the data. */
1271 memcpy (value_contents_all_raw (dst) + dst_offset,
1272 value_contents_all_raw (src) + src_offset,
1273 length);
1274
1275 /* Copy the meta-data, adjusted. */
1276 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1277 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1278 bit_length = length * TARGET_CHAR_BIT;
1279
1280 value_ranges_copy_adjusted (dst, dst_bit_offset,
1281 src, src_bit_offset,
1282 bit_length);
1283 }
1284
1285 /* Copy LENGTH bytes of SRC value's (all) contents
1286 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1287 (all) contents, starting at DST_OFFSET. If unavailable contents
1288 are being copied from SRC, the corresponding DST contents are
1289 marked unavailable accordingly. DST must not be lazy. If SRC is
1290 lazy, it will be fetched now.
1291
1292 It is assumed the contents of DST in the [DST_OFFSET,
1293 DST_OFFSET+LENGTH) range are wholly available. */
1294
1295 void
1296 value_contents_copy (struct value *dst, int dst_offset,
1297 struct value *src, int src_offset, int length)
1298 {
1299 if (src->lazy)
1300 value_fetch_lazy (src);
1301
1302 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1303 }
1304
1305 int
1306 value_lazy (struct value *value)
1307 {
1308 return value->lazy;
1309 }
1310
1311 void
1312 set_value_lazy (struct value *value, int val)
1313 {
1314 value->lazy = val;
1315 }
1316
1317 int
1318 value_stack (struct value *value)
1319 {
1320 return value->stack;
1321 }
1322
1323 void
1324 set_value_stack (struct value *value, int val)
1325 {
1326 value->stack = val;
1327 }
1328
1329 const gdb_byte *
1330 value_contents (struct value *value)
1331 {
1332 const gdb_byte *result = value_contents_writeable (value);
1333 require_not_optimized_out (value);
1334 require_available (value);
1335 return result;
1336 }
1337
1338 gdb_byte *
1339 value_contents_writeable (struct value *value)
1340 {
1341 if (value->lazy)
1342 value_fetch_lazy (value);
1343 return value_contents_raw (value);
1344 }
1345
1346 int
1347 value_optimized_out (struct value *value)
1348 {
1349 /* We can only know if a value is optimized out once we have tried to
1350 fetch it. */
1351 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1352 value_fetch_lazy (value);
1353
1354 return !VEC_empty (range_s, value->optimized_out);
1355 }
1356
1357 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1358 the following LENGTH bytes. */
1359
1360 void
1361 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1362 {
1363 mark_value_bits_optimized_out (value,
1364 offset * TARGET_CHAR_BIT,
1365 length * TARGET_CHAR_BIT);
1366 }
1367
1368 /* See value.h. */
1369
1370 void
1371 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1372 {
1373 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1374 }
1375
1376 int
1377 value_bits_synthetic_pointer (const struct value *value,
1378 int offset, int length)
1379 {
1380 if (value->lval != lval_computed
1381 || !value->location.computed.funcs->check_synthetic_pointer)
1382 return 0;
1383 return value->location.computed.funcs->check_synthetic_pointer (value,
1384 offset,
1385 length);
1386 }
1387
1388 int
1389 value_embedded_offset (struct value *value)
1390 {
1391 return value->embedded_offset;
1392 }
1393
1394 void
1395 set_value_embedded_offset (struct value *value, int val)
1396 {
1397 value->embedded_offset = val;
1398 }
1399
1400 int
1401 value_pointed_to_offset (struct value *value)
1402 {
1403 return value->pointed_to_offset;
1404 }
1405
1406 void
1407 set_value_pointed_to_offset (struct value *value, int val)
1408 {
1409 value->pointed_to_offset = val;
1410 }
1411
1412 const struct lval_funcs *
1413 value_computed_funcs (const struct value *v)
1414 {
1415 gdb_assert (value_lval_const (v) == lval_computed);
1416
1417 return v->location.computed.funcs;
1418 }
1419
1420 void *
1421 value_computed_closure (const struct value *v)
1422 {
1423 gdb_assert (v->lval == lval_computed);
1424
1425 return v->location.computed.closure;
1426 }
1427
1428 enum lval_type *
1429 deprecated_value_lval_hack (struct value *value)
1430 {
1431 return &value->lval;
1432 }
1433
1434 enum lval_type
1435 value_lval_const (const struct value *value)
1436 {
1437 return value->lval;
1438 }
1439
1440 CORE_ADDR
1441 value_address (const struct value *value)
1442 {
1443 if (value->lval == lval_internalvar
1444 || value->lval == lval_internalvar_component
1445 || value->lval == lval_xcallable)
1446 return 0;
1447 if (value->parent != NULL)
1448 return value_address (value->parent) + value->offset;
1449 else
1450 return value->location.address + value->offset;
1451 }
1452
1453 CORE_ADDR
1454 value_raw_address (struct value *value)
1455 {
1456 if (value->lval == lval_internalvar
1457 || value->lval == lval_internalvar_component
1458 || value->lval == lval_xcallable)
1459 return 0;
1460 return value->location.address;
1461 }
1462
1463 void
1464 set_value_address (struct value *value, CORE_ADDR addr)
1465 {
1466 gdb_assert (value->lval != lval_internalvar
1467 && value->lval != lval_internalvar_component
1468 && value->lval != lval_xcallable);
1469 value->location.address = addr;
1470 }
1471
1472 struct internalvar **
1473 deprecated_value_internalvar_hack (struct value *value)
1474 {
1475 return &value->location.internalvar;
1476 }
1477
1478 struct frame_id *
1479 deprecated_value_frame_id_hack (struct value *value)
1480 {
1481 return &value->frame_id;
1482 }
1483
1484 short *
1485 deprecated_value_regnum_hack (struct value *value)
1486 {
1487 return &value->regnum;
1488 }
1489
1490 int
1491 deprecated_value_modifiable (struct value *value)
1492 {
1493 return value->modifiable;
1494 }
1495 \f
1496 /* Return a mark in the value chain. All values allocated after the
1497 mark is obtained (except for those released) are subject to being freed
1498 if a subsequent value_free_to_mark is passed the mark. */
1499 struct value *
1500 value_mark (void)
1501 {
1502 return all_values;
1503 }
1504
1505 /* Take a reference to VAL. VAL will not be deallocated until all
1506 references are released. */
1507
1508 void
1509 value_incref (struct value *val)
1510 {
1511 val->reference_count++;
1512 }
1513
1514 /* Release a reference to VAL, which was acquired with value_incref.
1515 This function is also called to deallocate values from the value
1516 chain. */
1517
1518 void
1519 value_free (struct value *val)
1520 {
1521 if (val)
1522 {
1523 gdb_assert (val->reference_count > 0);
1524 val->reference_count--;
1525 if (val->reference_count > 0)
1526 return;
1527
1528 /* If there's an associated parent value, drop our reference to
1529 it. */
1530 if (val->parent != NULL)
1531 value_free (val->parent);
1532
1533 if (VALUE_LVAL (val) == lval_computed)
1534 {
1535 const struct lval_funcs *funcs = val->location.computed.funcs;
1536
1537 if (funcs->free_closure)
1538 funcs->free_closure (val);
1539 }
1540 else if (VALUE_LVAL (val) == lval_xcallable)
1541 free_xmethod_worker (val->location.xm_worker);
1542
1543 xfree (val->contents);
1544 VEC_free (range_s, val->unavailable);
1545 }
1546 xfree (val);
1547 }
1548
1549 /* Free all values allocated since MARK was obtained by value_mark
1550 (except for those released). */
1551 void
1552 value_free_to_mark (struct value *mark)
1553 {
1554 struct value *val;
1555 struct value *next;
1556
1557 for (val = all_values; val && val != mark; val = next)
1558 {
1559 next = val->next;
1560 val->released = 1;
1561 value_free (val);
1562 }
1563 all_values = val;
1564 }
1565
1566 /* Free all the values that have been allocated (except for those released).
1567 Call after each command, successful or not.
1568 In practice this is called before each command, which is sufficient. */
1569
1570 void
1571 free_all_values (void)
1572 {
1573 struct value *val;
1574 struct value *next;
1575
1576 for (val = all_values; val; val = next)
1577 {
1578 next = val->next;
1579 val->released = 1;
1580 value_free (val);
1581 }
1582
1583 all_values = 0;
1584 }
1585
1586 /* Frees all the elements in a chain of values. */
1587
1588 void
1589 free_value_chain (struct value *v)
1590 {
1591 struct value *next;
1592
1593 for (; v; v = next)
1594 {
1595 next = value_next (v);
1596 value_free (v);
1597 }
1598 }
1599
1600 /* Remove VAL from the chain all_values
1601 so it will not be freed automatically. */
1602
1603 void
1604 release_value (struct value *val)
1605 {
1606 struct value *v;
1607
1608 if (all_values == val)
1609 {
1610 all_values = val->next;
1611 val->next = NULL;
1612 val->released = 1;
1613 return;
1614 }
1615
1616 for (v = all_values; v; v = v->next)
1617 {
1618 if (v->next == val)
1619 {
1620 v->next = val->next;
1621 val->next = NULL;
1622 val->released = 1;
1623 break;
1624 }
1625 }
1626 }
1627
1628 /* If the value is not already released, release it.
1629 If the value is already released, increment its reference count.
1630 That is, this function ensures that the value is released from the
1631 value chain and that the caller owns a reference to it. */
1632
1633 void
1634 release_value_or_incref (struct value *val)
1635 {
1636 if (val->released)
1637 value_incref (val);
1638 else
1639 release_value (val);
1640 }
1641
1642 /* Release all values up to mark */
1643 struct value *
1644 value_release_to_mark (struct value *mark)
1645 {
1646 struct value *val;
1647 struct value *next;
1648
1649 for (val = next = all_values; next; next = next->next)
1650 {
1651 if (next->next == mark)
1652 {
1653 all_values = next->next;
1654 next->next = NULL;
1655 return val;
1656 }
1657 next->released = 1;
1658 }
1659 all_values = 0;
1660 return val;
1661 }
1662
1663 /* Return a copy of the value ARG.
1664 It contains the same contents, for same memory address,
1665 but it's a different block of storage. */
1666
1667 struct value *
1668 value_copy (struct value *arg)
1669 {
1670 struct type *encl_type = value_enclosing_type (arg);
1671 struct value *val;
1672
1673 if (value_lazy (arg))
1674 val = allocate_value_lazy (encl_type);
1675 else
1676 val = allocate_value (encl_type);
1677 val->type = arg->type;
1678 VALUE_LVAL (val) = VALUE_LVAL (arg);
1679 val->location = arg->location;
1680 val->offset = arg->offset;
1681 val->bitpos = arg->bitpos;
1682 val->bitsize = arg->bitsize;
1683 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1684 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1685 val->lazy = arg->lazy;
1686 val->embedded_offset = value_embedded_offset (arg);
1687 val->pointed_to_offset = arg->pointed_to_offset;
1688 val->modifiable = arg->modifiable;
1689 if (!value_lazy (val))
1690 {
1691 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1692 TYPE_LENGTH (value_enclosing_type (arg)));
1693
1694 }
1695 val->unavailable = VEC_copy (range_s, arg->unavailable);
1696 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1697 set_value_parent (val, arg->parent);
1698 if (VALUE_LVAL (val) == lval_computed)
1699 {
1700 const struct lval_funcs *funcs = val->location.computed.funcs;
1701
1702 if (funcs->copy_closure)
1703 val->location.computed.closure = funcs->copy_closure (val);
1704 }
1705 return val;
1706 }
1707
1708 /* Return a version of ARG that is non-lvalue. */
1709
1710 struct value *
1711 value_non_lval (struct value *arg)
1712 {
1713 if (VALUE_LVAL (arg) != not_lval)
1714 {
1715 struct type *enc_type = value_enclosing_type (arg);
1716 struct value *val = allocate_value (enc_type);
1717
1718 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1719 TYPE_LENGTH (enc_type));
1720 val->type = arg->type;
1721 set_value_embedded_offset (val, value_embedded_offset (arg));
1722 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1723 return val;
1724 }
1725 return arg;
1726 }
1727
1728 void
1729 set_value_component_location (struct value *component,
1730 const struct value *whole)
1731 {
1732 gdb_assert (whole->lval != lval_xcallable);
1733
1734 if (whole->lval == lval_internalvar)
1735 VALUE_LVAL (component) = lval_internalvar_component;
1736 else
1737 VALUE_LVAL (component) = whole->lval;
1738
1739 component->location = whole->location;
1740 if (whole->lval == lval_computed)
1741 {
1742 const struct lval_funcs *funcs = whole->location.computed.funcs;
1743
1744 if (funcs->copy_closure)
1745 component->location.computed.closure = funcs->copy_closure (whole);
1746 }
1747 }
1748
1749 \f
1750 /* Access to the value history. */
1751
1752 /* Record a new value in the value history.
1753 Returns the absolute history index of the entry. */
1754
1755 int
1756 record_latest_value (struct value *val)
1757 {
1758 int i;
1759
1760 /* We don't want this value to have anything to do with the inferior anymore.
1761 In particular, "set $1 = 50" should not affect the variable from which
1762 the value was taken, and fast watchpoints should be able to assume that
1763 a value on the value history never changes. */
1764 if (value_lazy (val))
1765 value_fetch_lazy (val);
1766 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1767 from. This is a bit dubious, because then *&$1 does not just return $1
1768 but the current contents of that location. c'est la vie... */
1769 val->modifiable = 0;
1770
1771 /* The value may have already been released, in which case we're adding a
1772 new reference for its entry in the history. That is why we call
1773 release_value_or_incref here instead of release_value. */
1774 release_value_or_incref (val);
1775
1776 /* Here we treat value_history_count as origin-zero
1777 and applying to the value being stored now. */
1778
1779 i = value_history_count % VALUE_HISTORY_CHUNK;
1780 if (i == 0)
1781 {
1782 struct value_history_chunk *new
1783 = (struct value_history_chunk *)
1784
1785 xmalloc (sizeof (struct value_history_chunk));
1786 memset (new->values, 0, sizeof new->values);
1787 new->next = value_history_chain;
1788 value_history_chain = new;
1789 }
1790
1791 value_history_chain->values[i] = val;
1792
1793 /* Now we regard value_history_count as origin-one
1794 and applying to the value just stored. */
1795
1796 return ++value_history_count;
1797 }
1798
1799 /* Return a copy of the value in the history with sequence number NUM. */
1800
1801 struct value *
1802 access_value_history (int num)
1803 {
1804 struct value_history_chunk *chunk;
1805 int i;
1806 int absnum = num;
1807
1808 if (absnum <= 0)
1809 absnum += value_history_count;
1810
1811 if (absnum <= 0)
1812 {
1813 if (num == 0)
1814 error (_("The history is empty."));
1815 else if (num == 1)
1816 error (_("There is only one value in the history."));
1817 else
1818 error (_("History does not go back to $$%d."), -num);
1819 }
1820 if (absnum > value_history_count)
1821 error (_("History has not yet reached $%d."), absnum);
1822
1823 absnum--;
1824
1825 /* Now absnum is always absolute and origin zero. */
1826
1827 chunk = value_history_chain;
1828 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1829 - absnum / VALUE_HISTORY_CHUNK;
1830 i > 0; i--)
1831 chunk = chunk->next;
1832
1833 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1834 }
1835
1836 static void
1837 show_values (char *num_exp, int from_tty)
1838 {
1839 int i;
1840 struct value *val;
1841 static int num = 1;
1842
1843 if (num_exp)
1844 {
1845 /* "show values +" should print from the stored position.
1846 "show values <exp>" should print around value number <exp>. */
1847 if (num_exp[0] != '+' || num_exp[1] != '\0')
1848 num = parse_and_eval_long (num_exp) - 5;
1849 }
1850 else
1851 {
1852 /* "show values" means print the last 10 values. */
1853 num = value_history_count - 9;
1854 }
1855
1856 if (num <= 0)
1857 num = 1;
1858
1859 for (i = num; i < num + 10 && i <= value_history_count; i++)
1860 {
1861 struct value_print_options opts;
1862
1863 val = access_value_history (i);
1864 printf_filtered (("$%d = "), i);
1865 get_user_print_options (&opts);
1866 value_print (val, gdb_stdout, &opts);
1867 printf_filtered (("\n"));
1868 }
1869
1870 /* The next "show values +" should start after what we just printed. */
1871 num += 10;
1872
1873 /* Hitting just return after this command should do the same thing as
1874 "show values +". If num_exp is null, this is unnecessary, since
1875 "show values +" is not useful after "show values". */
1876 if (from_tty && num_exp)
1877 {
1878 num_exp[0] = '+';
1879 num_exp[1] = '\0';
1880 }
1881 }
1882 \f
1883 /* Internal variables. These are variables within the debugger
1884 that hold values assigned by debugger commands.
1885 The user refers to them with a '$' prefix
1886 that does not appear in the variable names stored internally. */
1887
1888 struct internalvar
1889 {
1890 struct internalvar *next;
1891 char *name;
1892
1893 /* We support various different kinds of content of an internal variable.
1894 enum internalvar_kind specifies the kind, and union internalvar_data
1895 provides the data associated with this particular kind. */
1896
1897 enum internalvar_kind
1898 {
1899 /* The internal variable is empty. */
1900 INTERNALVAR_VOID,
1901
1902 /* The value of the internal variable is provided directly as
1903 a GDB value object. */
1904 INTERNALVAR_VALUE,
1905
1906 /* A fresh value is computed via a call-back routine on every
1907 access to the internal variable. */
1908 INTERNALVAR_MAKE_VALUE,
1909
1910 /* The internal variable holds a GDB internal convenience function. */
1911 INTERNALVAR_FUNCTION,
1912
1913 /* The variable holds an integer value. */
1914 INTERNALVAR_INTEGER,
1915
1916 /* The variable holds a GDB-provided string. */
1917 INTERNALVAR_STRING,
1918
1919 } kind;
1920
1921 union internalvar_data
1922 {
1923 /* A value object used with INTERNALVAR_VALUE. */
1924 struct value *value;
1925
1926 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1927 struct
1928 {
1929 /* The functions to call. */
1930 const struct internalvar_funcs *functions;
1931
1932 /* The function's user-data. */
1933 void *data;
1934 } make_value;
1935
1936 /* The internal function used with INTERNALVAR_FUNCTION. */
1937 struct
1938 {
1939 struct internal_function *function;
1940 /* True if this is the canonical name for the function. */
1941 int canonical;
1942 } fn;
1943
1944 /* An integer value used with INTERNALVAR_INTEGER. */
1945 struct
1946 {
1947 /* If type is non-NULL, it will be used as the type to generate
1948 a value for this internal variable. If type is NULL, a default
1949 integer type for the architecture is used. */
1950 struct type *type;
1951 LONGEST val;
1952 } integer;
1953
1954 /* A string value used with INTERNALVAR_STRING. */
1955 char *string;
1956 } u;
1957 };
1958
1959 static struct internalvar *internalvars;
1960
1961 /* If the variable does not already exist create it and give it the
1962 value given. If no value is given then the default is zero. */
1963 static void
1964 init_if_undefined_command (char* args, int from_tty)
1965 {
1966 struct internalvar* intvar;
1967
1968 /* Parse the expression - this is taken from set_command(). */
1969 struct expression *expr = parse_expression (args);
1970 register struct cleanup *old_chain =
1971 make_cleanup (free_current_contents, &expr);
1972
1973 /* Validate the expression.
1974 Was the expression an assignment?
1975 Or even an expression at all? */
1976 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1977 error (_("Init-if-undefined requires an assignment expression."));
1978
1979 /* Extract the variable from the parsed expression.
1980 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1981 if (expr->elts[1].opcode != OP_INTERNALVAR)
1982 error (_("The first parameter to init-if-undefined "
1983 "should be a GDB variable."));
1984 intvar = expr->elts[2].internalvar;
1985
1986 /* Only evaluate the expression if the lvalue is void.
1987 This may still fail if the expresssion is invalid. */
1988 if (intvar->kind == INTERNALVAR_VOID)
1989 evaluate_expression (expr);
1990
1991 do_cleanups (old_chain);
1992 }
1993
1994
1995 /* Look up an internal variable with name NAME. NAME should not
1996 normally include a dollar sign.
1997
1998 If the specified internal variable does not exist,
1999 the return value is NULL. */
2000
2001 struct internalvar *
2002 lookup_only_internalvar (const char *name)
2003 {
2004 struct internalvar *var;
2005
2006 for (var = internalvars; var; var = var->next)
2007 if (strcmp (var->name, name) == 0)
2008 return var;
2009
2010 return NULL;
2011 }
2012
2013 /* Complete NAME by comparing it to the names of internal variables.
2014 Returns a vector of newly allocated strings, or NULL if no matches
2015 were found. */
2016
2017 VEC (char_ptr) *
2018 complete_internalvar (const char *name)
2019 {
2020 VEC (char_ptr) *result = NULL;
2021 struct internalvar *var;
2022 int len;
2023
2024 len = strlen (name);
2025
2026 for (var = internalvars; var; var = var->next)
2027 if (strncmp (var->name, name, len) == 0)
2028 {
2029 char *r = xstrdup (var->name);
2030
2031 VEC_safe_push (char_ptr, result, r);
2032 }
2033
2034 return result;
2035 }
2036
2037 /* Create an internal variable with name NAME and with a void value.
2038 NAME should not normally include a dollar sign. */
2039
2040 struct internalvar *
2041 create_internalvar (const char *name)
2042 {
2043 struct internalvar *var;
2044
2045 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2046 var->name = concat (name, (char *)NULL);
2047 var->kind = INTERNALVAR_VOID;
2048 var->next = internalvars;
2049 internalvars = var;
2050 return var;
2051 }
2052
2053 /* Create an internal variable with name NAME and register FUN as the
2054 function that value_of_internalvar uses to create a value whenever
2055 this variable is referenced. NAME should not normally include a
2056 dollar sign. DATA is passed uninterpreted to FUN when it is
2057 called. CLEANUP, if not NULL, is called when the internal variable
2058 is destroyed. It is passed DATA as its only argument. */
2059
2060 struct internalvar *
2061 create_internalvar_type_lazy (const char *name,
2062 const struct internalvar_funcs *funcs,
2063 void *data)
2064 {
2065 struct internalvar *var = create_internalvar (name);
2066
2067 var->kind = INTERNALVAR_MAKE_VALUE;
2068 var->u.make_value.functions = funcs;
2069 var->u.make_value.data = data;
2070 return var;
2071 }
2072
2073 /* See documentation in value.h. */
2074
2075 int
2076 compile_internalvar_to_ax (struct internalvar *var,
2077 struct agent_expr *expr,
2078 struct axs_value *value)
2079 {
2080 if (var->kind != INTERNALVAR_MAKE_VALUE
2081 || var->u.make_value.functions->compile_to_ax == NULL)
2082 return 0;
2083
2084 var->u.make_value.functions->compile_to_ax (var, expr, value,
2085 var->u.make_value.data);
2086 return 1;
2087 }
2088
2089 /* Look up an internal variable with name NAME. NAME should not
2090 normally include a dollar sign.
2091
2092 If the specified internal variable does not exist,
2093 one is created, with a void value. */
2094
2095 struct internalvar *
2096 lookup_internalvar (const char *name)
2097 {
2098 struct internalvar *var;
2099
2100 var = lookup_only_internalvar (name);
2101 if (var)
2102 return var;
2103
2104 return create_internalvar (name);
2105 }
2106
2107 /* Return current value of internal variable VAR. For variables that
2108 are not inherently typed, use a value type appropriate for GDBARCH. */
2109
2110 struct value *
2111 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2112 {
2113 struct value *val;
2114 struct trace_state_variable *tsv;
2115
2116 /* If there is a trace state variable of the same name, assume that
2117 is what we really want to see. */
2118 tsv = find_trace_state_variable (var->name);
2119 if (tsv)
2120 {
2121 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2122 &(tsv->value));
2123 if (tsv->value_known)
2124 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2125 tsv->value);
2126 else
2127 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2128 return val;
2129 }
2130
2131 switch (var->kind)
2132 {
2133 case INTERNALVAR_VOID:
2134 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2135 break;
2136
2137 case INTERNALVAR_FUNCTION:
2138 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2139 break;
2140
2141 case INTERNALVAR_INTEGER:
2142 if (!var->u.integer.type)
2143 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2144 var->u.integer.val);
2145 else
2146 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2147 break;
2148
2149 case INTERNALVAR_STRING:
2150 val = value_cstring (var->u.string, strlen (var->u.string),
2151 builtin_type (gdbarch)->builtin_char);
2152 break;
2153
2154 case INTERNALVAR_VALUE:
2155 val = value_copy (var->u.value);
2156 if (value_lazy (val))
2157 value_fetch_lazy (val);
2158 break;
2159
2160 case INTERNALVAR_MAKE_VALUE:
2161 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2162 var->u.make_value.data);
2163 break;
2164
2165 default:
2166 internal_error (__FILE__, __LINE__, _("bad kind"));
2167 }
2168
2169 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2170 on this value go back to affect the original internal variable.
2171
2172 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2173 no underlying modifyable state in the internal variable.
2174
2175 Likewise, if the variable's value is a computed lvalue, we want
2176 references to it to produce another computed lvalue, where
2177 references and assignments actually operate through the
2178 computed value's functions.
2179
2180 This means that internal variables with computed values
2181 behave a little differently from other internal variables:
2182 assignments to them don't just replace the previous value
2183 altogether. At the moment, this seems like the behavior we
2184 want. */
2185
2186 if (var->kind != INTERNALVAR_MAKE_VALUE
2187 && val->lval != lval_computed)
2188 {
2189 VALUE_LVAL (val) = lval_internalvar;
2190 VALUE_INTERNALVAR (val) = var;
2191 }
2192
2193 return val;
2194 }
2195
2196 int
2197 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2198 {
2199 if (var->kind == INTERNALVAR_INTEGER)
2200 {
2201 *result = var->u.integer.val;
2202 return 1;
2203 }
2204
2205 if (var->kind == INTERNALVAR_VALUE)
2206 {
2207 struct type *type = check_typedef (value_type (var->u.value));
2208
2209 if (TYPE_CODE (type) == TYPE_CODE_INT)
2210 {
2211 *result = value_as_long (var->u.value);
2212 return 1;
2213 }
2214 }
2215
2216 return 0;
2217 }
2218
2219 static int
2220 get_internalvar_function (struct internalvar *var,
2221 struct internal_function **result)
2222 {
2223 switch (var->kind)
2224 {
2225 case INTERNALVAR_FUNCTION:
2226 *result = var->u.fn.function;
2227 return 1;
2228
2229 default:
2230 return 0;
2231 }
2232 }
2233
2234 void
2235 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2236 int bitsize, struct value *newval)
2237 {
2238 gdb_byte *addr;
2239
2240 switch (var->kind)
2241 {
2242 case INTERNALVAR_VALUE:
2243 addr = value_contents_writeable (var->u.value);
2244
2245 if (bitsize)
2246 modify_field (value_type (var->u.value), addr + offset,
2247 value_as_long (newval), bitpos, bitsize);
2248 else
2249 memcpy (addr + offset, value_contents (newval),
2250 TYPE_LENGTH (value_type (newval)));
2251 break;
2252
2253 default:
2254 /* We can never get a component of any other kind. */
2255 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2256 }
2257 }
2258
2259 void
2260 set_internalvar (struct internalvar *var, struct value *val)
2261 {
2262 enum internalvar_kind new_kind;
2263 union internalvar_data new_data = { 0 };
2264
2265 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2266 error (_("Cannot overwrite convenience function %s"), var->name);
2267
2268 /* Prepare new contents. */
2269 switch (TYPE_CODE (check_typedef (value_type (val))))
2270 {
2271 case TYPE_CODE_VOID:
2272 new_kind = INTERNALVAR_VOID;
2273 break;
2274
2275 case TYPE_CODE_INTERNAL_FUNCTION:
2276 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2277 new_kind = INTERNALVAR_FUNCTION;
2278 get_internalvar_function (VALUE_INTERNALVAR (val),
2279 &new_data.fn.function);
2280 /* Copies created here are never canonical. */
2281 break;
2282
2283 default:
2284 new_kind = INTERNALVAR_VALUE;
2285 new_data.value = value_copy (val);
2286 new_data.value->modifiable = 1;
2287
2288 /* Force the value to be fetched from the target now, to avoid problems
2289 later when this internalvar is referenced and the target is gone or
2290 has changed. */
2291 if (value_lazy (new_data.value))
2292 value_fetch_lazy (new_data.value);
2293
2294 /* Release the value from the value chain to prevent it from being
2295 deleted by free_all_values. From here on this function should not
2296 call error () until new_data is installed into the var->u to avoid
2297 leaking memory. */
2298 release_value (new_data.value);
2299 break;
2300 }
2301
2302 /* Clean up old contents. */
2303 clear_internalvar (var);
2304
2305 /* Switch over. */
2306 var->kind = new_kind;
2307 var->u = new_data;
2308 /* End code which must not call error(). */
2309 }
2310
2311 void
2312 set_internalvar_integer (struct internalvar *var, LONGEST l)
2313 {
2314 /* Clean up old contents. */
2315 clear_internalvar (var);
2316
2317 var->kind = INTERNALVAR_INTEGER;
2318 var->u.integer.type = NULL;
2319 var->u.integer.val = l;
2320 }
2321
2322 void
2323 set_internalvar_string (struct internalvar *var, const char *string)
2324 {
2325 /* Clean up old contents. */
2326 clear_internalvar (var);
2327
2328 var->kind = INTERNALVAR_STRING;
2329 var->u.string = xstrdup (string);
2330 }
2331
2332 static void
2333 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2334 {
2335 /* Clean up old contents. */
2336 clear_internalvar (var);
2337
2338 var->kind = INTERNALVAR_FUNCTION;
2339 var->u.fn.function = f;
2340 var->u.fn.canonical = 1;
2341 /* Variables installed here are always the canonical version. */
2342 }
2343
2344 void
2345 clear_internalvar (struct internalvar *var)
2346 {
2347 /* Clean up old contents. */
2348 switch (var->kind)
2349 {
2350 case INTERNALVAR_VALUE:
2351 value_free (var->u.value);
2352 break;
2353
2354 case INTERNALVAR_STRING:
2355 xfree (var->u.string);
2356 break;
2357
2358 case INTERNALVAR_MAKE_VALUE:
2359 if (var->u.make_value.functions->destroy != NULL)
2360 var->u.make_value.functions->destroy (var->u.make_value.data);
2361 break;
2362
2363 default:
2364 break;
2365 }
2366
2367 /* Reset to void kind. */
2368 var->kind = INTERNALVAR_VOID;
2369 }
2370
2371 char *
2372 internalvar_name (struct internalvar *var)
2373 {
2374 return var->name;
2375 }
2376
2377 static struct internal_function *
2378 create_internal_function (const char *name,
2379 internal_function_fn handler, void *cookie)
2380 {
2381 struct internal_function *ifn = XNEW (struct internal_function);
2382
2383 ifn->name = xstrdup (name);
2384 ifn->handler = handler;
2385 ifn->cookie = cookie;
2386 return ifn;
2387 }
2388
2389 char *
2390 value_internal_function_name (struct value *val)
2391 {
2392 struct internal_function *ifn;
2393 int result;
2394
2395 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2396 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2397 gdb_assert (result);
2398
2399 return ifn->name;
2400 }
2401
2402 struct value *
2403 call_internal_function (struct gdbarch *gdbarch,
2404 const struct language_defn *language,
2405 struct value *func, int argc, struct value **argv)
2406 {
2407 struct internal_function *ifn;
2408 int result;
2409
2410 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2411 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2412 gdb_assert (result);
2413
2414 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2415 }
2416
2417 /* The 'function' command. This does nothing -- it is just a
2418 placeholder to let "help function NAME" work. This is also used as
2419 the implementation of the sub-command that is created when
2420 registering an internal function. */
2421 static void
2422 function_command (char *command, int from_tty)
2423 {
2424 /* Do nothing. */
2425 }
2426
2427 /* Clean up if an internal function's command is destroyed. */
2428 static void
2429 function_destroyer (struct cmd_list_element *self, void *ignore)
2430 {
2431 xfree ((char *) self->name);
2432 xfree ((char *) self->doc);
2433 }
2434
2435 /* Add a new internal function. NAME is the name of the function; DOC
2436 is a documentation string describing the function. HANDLER is
2437 called when the function is invoked. COOKIE is an arbitrary
2438 pointer which is passed to HANDLER and is intended for "user
2439 data". */
2440 void
2441 add_internal_function (const char *name, const char *doc,
2442 internal_function_fn handler, void *cookie)
2443 {
2444 struct cmd_list_element *cmd;
2445 struct internal_function *ifn;
2446 struct internalvar *var = lookup_internalvar (name);
2447
2448 ifn = create_internal_function (name, handler, cookie);
2449 set_internalvar_function (var, ifn);
2450
2451 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2452 &functionlist);
2453 cmd->destroyer = function_destroyer;
2454 }
2455
2456 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2457 prevent cycles / duplicates. */
2458
2459 void
2460 preserve_one_value (struct value *value, struct objfile *objfile,
2461 htab_t copied_types)
2462 {
2463 if (TYPE_OBJFILE (value->type) == objfile)
2464 value->type = copy_type_recursive (objfile, value->type, copied_types);
2465
2466 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2467 value->enclosing_type = copy_type_recursive (objfile,
2468 value->enclosing_type,
2469 copied_types);
2470 }
2471
2472 /* Likewise for internal variable VAR. */
2473
2474 static void
2475 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2476 htab_t copied_types)
2477 {
2478 switch (var->kind)
2479 {
2480 case INTERNALVAR_INTEGER:
2481 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2482 var->u.integer.type
2483 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2484 break;
2485
2486 case INTERNALVAR_VALUE:
2487 preserve_one_value (var->u.value, objfile, copied_types);
2488 break;
2489 }
2490 }
2491
2492 /* Update the internal variables and value history when OBJFILE is
2493 discarded; we must copy the types out of the objfile. New global types
2494 will be created for every convenience variable which currently points to
2495 this objfile's types, and the convenience variables will be adjusted to
2496 use the new global types. */
2497
2498 void
2499 preserve_values (struct objfile *objfile)
2500 {
2501 htab_t copied_types;
2502 struct value_history_chunk *cur;
2503 struct internalvar *var;
2504 int i;
2505
2506 /* Create the hash table. We allocate on the objfile's obstack, since
2507 it is soon to be deleted. */
2508 copied_types = create_copied_types_hash (objfile);
2509
2510 for (cur = value_history_chain; cur; cur = cur->next)
2511 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2512 if (cur->values[i])
2513 preserve_one_value (cur->values[i], objfile, copied_types);
2514
2515 for (var = internalvars; var; var = var->next)
2516 preserve_one_internalvar (var, objfile, copied_types);
2517
2518 preserve_ext_lang_values (objfile, copied_types);
2519
2520 htab_delete (copied_types);
2521 }
2522
2523 static void
2524 show_convenience (char *ignore, int from_tty)
2525 {
2526 struct gdbarch *gdbarch = get_current_arch ();
2527 struct internalvar *var;
2528 int varseen = 0;
2529 struct value_print_options opts;
2530
2531 get_user_print_options (&opts);
2532 for (var = internalvars; var; var = var->next)
2533 {
2534 volatile struct gdb_exception ex;
2535
2536 if (!varseen)
2537 {
2538 varseen = 1;
2539 }
2540 printf_filtered (("$%s = "), var->name);
2541
2542 TRY_CATCH (ex, RETURN_MASK_ERROR)
2543 {
2544 struct value *val;
2545
2546 val = value_of_internalvar (gdbarch, var);
2547 value_print (val, gdb_stdout, &opts);
2548 }
2549 if (ex.reason < 0)
2550 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2551 printf_filtered (("\n"));
2552 }
2553 if (!varseen)
2554 {
2555 /* This text does not mention convenience functions on purpose.
2556 The user can't create them except via Python, and if Python support
2557 is installed this message will never be printed ($_streq will
2558 exist). */
2559 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2560 "Convenience variables have "
2561 "names starting with \"$\";\n"
2562 "use \"set\" as in \"set "
2563 "$foo = 5\" to define them.\n"));
2564 }
2565 }
2566 \f
2567 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2568
2569 struct value *
2570 value_of_xmethod (struct xmethod_worker *worker)
2571 {
2572 if (worker->value == NULL)
2573 {
2574 struct value *v;
2575
2576 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2577 v->lval = lval_xcallable;
2578 v->location.xm_worker = worker;
2579 v->modifiable = 0;
2580 worker->value = v;
2581 }
2582
2583 return worker->value;
2584 }
2585
2586 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2587
2588 struct value *
2589 call_xmethod (struct value *method, int argc, struct value **argv)
2590 {
2591 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2592 && method->lval == lval_xcallable && argc > 0);
2593
2594 return invoke_xmethod (method->location.xm_worker,
2595 argv[0], argv + 1, argc - 1);
2596 }
2597 \f
2598 /* Extract a value as a C number (either long or double).
2599 Knows how to convert fixed values to double, or
2600 floating values to long.
2601 Does not deallocate the value. */
2602
2603 LONGEST
2604 value_as_long (struct value *val)
2605 {
2606 /* This coerces arrays and functions, which is necessary (e.g.
2607 in disassemble_command). It also dereferences references, which
2608 I suspect is the most logical thing to do. */
2609 val = coerce_array (val);
2610 return unpack_long (value_type (val), value_contents (val));
2611 }
2612
2613 DOUBLEST
2614 value_as_double (struct value *val)
2615 {
2616 DOUBLEST foo;
2617 int inv;
2618
2619 foo = unpack_double (value_type (val), value_contents (val), &inv);
2620 if (inv)
2621 error (_("Invalid floating value found in program."));
2622 return foo;
2623 }
2624
2625 /* Extract a value as a C pointer. Does not deallocate the value.
2626 Note that val's type may not actually be a pointer; value_as_long
2627 handles all the cases. */
2628 CORE_ADDR
2629 value_as_address (struct value *val)
2630 {
2631 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2632
2633 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2634 whether we want this to be true eventually. */
2635 #if 0
2636 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2637 non-address (e.g. argument to "signal", "info break", etc.), or
2638 for pointers to char, in which the low bits *are* significant. */
2639 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2640 #else
2641
2642 /* There are several targets (IA-64, PowerPC, and others) which
2643 don't represent pointers to functions as simply the address of
2644 the function's entry point. For example, on the IA-64, a
2645 function pointer points to a two-word descriptor, generated by
2646 the linker, which contains the function's entry point, and the
2647 value the IA-64 "global pointer" register should have --- to
2648 support position-independent code. The linker generates
2649 descriptors only for those functions whose addresses are taken.
2650
2651 On such targets, it's difficult for GDB to convert an arbitrary
2652 function address into a function pointer; it has to either find
2653 an existing descriptor for that function, or call malloc and
2654 build its own. On some targets, it is impossible for GDB to
2655 build a descriptor at all: the descriptor must contain a jump
2656 instruction; data memory cannot be executed; and code memory
2657 cannot be modified.
2658
2659 Upon entry to this function, if VAL is a value of type `function'
2660 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2661 value_address (val) is the address of the function. This is what
2662 you'll get if you evaluate an expression like `main'. The call
2663 to COERCE_ARRAY below actually does all the usual unary
2664 conversions, which includes converting values of type `function'
2665 to `pointer to function'. This is the challenging conversion
2666 discussed above. Then, `unpack_long' will convert that pointer
2667 back into an address.
2668
2669 So, suppose the user types `disassemble foo' on an architecture
2670 with a strange function pointer representation, on which GDB
2671 cannot build its own descriptors, and suppose further that `foo'
2672 has no linker-built descriptor. The address->pointer conversion
2673 will signal an error and prevent the command from running, even
2674 though the next step would have been to convert the pointer
2675 directly back into the same address.
2676
2677 The following shortcut avoids this whole mess. If VAL is a
2678 function, just return its address directly. */
2679 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2680 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2681 return value_address (val);
2682
2683 val = coerce_array (val);
2684
2685 /* Some architectures (e.g. Harvard), map instruction and data
2686 addresses onto a single large unified address space. For
2687 instance: An architecture may consider a large integer in the
2688 range 0x10000000 .. 0x1000ffff to already represent a data
2689 addresses (hence not need a pointer to address conversion) while
2690 a small integer would still need to be converted integer to
2691 pointer to address. Just assume such architectures handle all
2692 integer conversions in a single function. */
2693
2694 /* JimB writes:
2695
2696 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2697 must admonish GDB hackers to make sure its behavior matches the
2698 compiler's, whenever possible.
2699
2700 In general, I think GDB should evaluate expressions the same way
2701 the compiler does. When the user copies an expression out of
2702 their source code and hands it to a `print' command, they should
2703 get the same value the compiler would have computed. Any
2704 deviation from this rule can cause major confusion and annoyance,
2705 and needs to be justified carefully. In other words, GDB doesn't
2706 really have the freedom to do these conversions in clever and
2707 useful ways.
2708
2709 AndrewC pointed out that users aren't complaining about how GDB
2710 casts integers to pointers; they are complaining that they can't
2711 take an address from a disassembly listing and give it to `x/i'.
2712 This is certainly important.
2713
2714 Adding an architecture method like integer_to_address() certainly
2715 makes it possible for GDB to "get it right" in all circumstances
2716 --- the target has complete control over how things get done, so
2717 people can Do The Right Thing for their target without breaking
2718 anyone else. The standard doesn't specify how integers get
2719 converted to pointers; usually, the ABI doesn't either, but
2720 ABI-specific code is a more reasonable place to handle it. */
2721
2722 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2723 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2724 && gdbarch_integer_to_address_p (gdbarch))
2725 return gdbarch_integer_to_address (gdbarch, value_type (val),
2726 value_contents (val));
2727
2728 return unpack_long (value_type (val), value_contents (val));
2729 #endif
2730 }
2731 \f
2732 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2733 as a long, or as a double, assuming the raw data is described
2734 by type TYPE. Knows how to convert different sizes of values
2735 and can convert between fixed and floating point. We don't assume
2736 any alignment for the raw data. Return value is in host byte order.
2737
2738 If you want functions and arrays to be coerced to pointers, and
2739 references to be dereferenced, call value_as_long() instead.
2740
2741 C++: It is assumed that the front-end has taken care of
2742 all matters concerning pointers to members. A pointer
2743 to member which reaches here is considered to be equivalent
2744 to an INT (or some size). After all, it is only an offset. */
2745
2746 LONGEST
2747 unpack_long (struct type *type, const gdb_byte *valaddr)
2748 {
2749 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2750 enum type_code code = TYPE_CODE (type);
2751 int len = TYPE_LENGTH (type);
2752 int nosign = TYPE_UNSIGNED (type);
2753
2754 switch (code)
2755 {
2756 case TYPE_CODE_TYPEDEF:
2757 return unpack_long (check_typedef (type), valaddr);
2758 case TYPE_CODE_ENUM:
2759 case TYPE_CODE_FLAGS:
2760 case TYPE_CODE_BOOL:
2761 case TYPE_CODE_INT:
2762 case TYPE_CODE_CHAR:
2763 case TYPE_CODE_RANGE:
2764 case TYPE_CODE_MEMBERPTR:
2765 if (nosign)
2766 return extract_unsigned_integer (valaddr, len, byte_order);
2767 else
2768 return extract_signed_integer (valaddr, len, byte_order);
2769
2770 case TYPE_CODE_FLT:
2771 return extract_typed_floating (valaddr, type);
2772
2773 case TYPE_CODE_DECFLOAT:
2774 /* libdecnumber has a function to convert from decimal to integer, but
2775 it doesn't work when the decimal number has a fractional part. */
2776 return decimal_to_doublest (valaddr, len, byte_order);
2777
2778 case TYPE_CODE_PTR:
2779 case TYPE_CODE_REF:
2780 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2781 whether we want this to be true eventually. */
2782 return extract_typed_address (valaddr, type);
2783
2784 default:
2785 error (_("Value can't be converted to integer."));
2786 }
2787 return 0; /* Placate lint. */
2788 }
2789
2790 /* Return a double value from the specified type and address.
2791 INVP points to an int which is set to 0 for valid value,
2792 1 for invalid value (bad float format). In either case,
2793 the returned double is OK to use. Argument is in target
2794 format, result is in host format. */
2795
2796 DOUBLEST
2797 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2798 {
2799 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2800 enum type_code code;
2801 int len;
2802 int nosign;
2803
2804 *invp = 0; /* Assume valid. */
2805 CHECK_TYPEDEF (type);
2806 code = TYPE_CODE (type);
2807 len = TYPE_LENGTH (type);
2808 nosign = TYPE_UNSIGNED (type);
2809 if (code == TYPE_CODE_FLT)
2810 {
2811 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2812 floating-point value was valid (using the macro
2813 INVALID_FLOAT). That test/macro have been removed.
2814
2815 It turns out that only the VAX defined this macro and then
2816 only in a non-portable way. Fixing the portability problem
2817 wouldn't help since the VAX floating-point code is also badly
2818 bit-rotten. The target needs to add definitions for the
2819 methods gdbarch_float_format and gdbarch_double_format - these
2820 exactly describe the target floating-point format. The
2821 problem here is that the corresponding floatformat_vax_f and
2822 floatformat_vax_d values these methods should be set to are
2823 also not defined either. Oops!
2824
2825 Hopefully someone will add both the missing floatformat
2826 definitions and the new cases for floatformat_is_valid (). */
2827
2828 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2829 {
2830 *invp = 1;
2831 return 0.0;
2832 }
2833
2834 return extract_typed_floating (valaddr, type);
2835 }
2836 else if (code == TYPE_CODE_DECFLOAT)
2837 return decimal_to_doublest (valaddr, len, byte_order);
2838 else if (nosign)
2839 {
2840 /* Unsigned -- be sure we compensate for signed LONGEST. */
2841 return (ULONGEST) unpack_long (type, valaddr);
2842 }
2843 else
2844 {
2845 /* Signed -- we are OK with unpack_long. */
2846 return unpack_long (type, valaddr);
2847 }
2848 }
2849
2850 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2851 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2852 We don't assume any alignment for the raw data. Return value is in
2853 host byte order.
2854
2855 If you want functions and arrays to be coerced to pointers, and
2856 references to be dereferenced, call value_as_address() instead.
2857
2858 C++: It is assumed that the front-end has taken care of
2859 all matters concerning pointers to members. A pointer
2860 to member which reaches here is considered to be equivalent
2861 to an INT (or some size). After all, it is only an offset. */
2862
2863 CORE_ADDR
2864 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2865 {
2866 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2867 whether we want this to be true eventually. */
2868 return unpack_long (type, valaddr);
2869 }
2870
2871 \f
2872 /* Get the value of the FIELDNO'th field (which must be static) of
2873 TYPE. */
2874
2875 struct value *
2876 value_static_field (struct type *type, int fieldno)
2877 {
2878 struct value *retval;
2879
2880 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2881 {
2882 case FIELD_LOC_KIND_PHYSADDR:
2883 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2884 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2885 break;
2886 case FIELD_LOC_KIND_PHYSNAME:
2887 {
2888 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2889 /* TYPE_FIELD_NAME (type, fieldno); */
2890 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2891
2892 if (sym == NULL)
2893 {
2894 /* With some compilers, e.g. HP aCC, static data members are
2895 reported as non-debuggable symbols. */
2896 struct bound_minimal_symbol msym
2897 = lookup_minimal_symbol (phys_name, NULL, NULL);
2898
2899 if (!msym.minsym)
2900 return allocate_optimized_out_value (type);
2901 else
2902 {
2903 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2904 BMSYMBOL_VALUE_ADDRESS (msym));
2905 }
2906 }
2907 else
2908 retval = value_of_variable (sym, NULL);
2909 break;
2910 }
2911 default:
2912 gdb_assert_not_reached ("unexpected field location kind");
2913 }
2914
2915 return retval;
2916 }
2917
2918 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2919 You have to be careful here, since the size of the data area for the value
2920 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2921 than the old enclosing type, you have to allocate more space for the
2922 data. */
2923
2924 void
2925 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2926 {
2927 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2928 val->contents =
2929 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2930
2931 val->enclosing_type = new_encl_type;
2932 }
2933
2934 /* Given a value ARG1 (offset by OFFSET bytes)
2935 of a struct or union type ARG_TYPE,
2936 extract and return the value of one of its (non-static) fields.
2937 FIELDNO says which field. */
2938
2939 struct value *
2940 value_primitive_field (struct value *arg1, int offset,
2941 int fieldno, struct type *arg_type)
2942 {
2943 struct value *v;
2944 struct type *type;
2945
2946 CHECK_TYPEDEF (arg_type);
2947 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2948
2949 /* Call check_typedef on our type to make sure that, if TYPE
2950 is a TYPE_CODE_TYPEDEF, its length is set to the length
2951 of the target type instead of zero. However, we do not
2952 replace the typedef type by the target type, because we want
2953 to keep the typedef in order to be able to print the type
2954 description correctly. */
2955 check_typedef (type);
2956
2957 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2958 {
2959 /* Handle packed fields.
2960
2961 Create a new value for the bitfield, with bitpos and bitsize
2962 set. If possible, arrange offset and bitpos so that we can
2963 do a single aligned read of the size of the containing type.
2964 Otherwise, adjust offset to the byte containing the first
2965 bit. Assume that the address, offset, and embedded offset
2966 are sufficiently aligned. */
2967
2968 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2969 int container_bitsize = TYPE_LENGTH (type) * 8;
2970
2971 v = allocate_value_lazy (type);
2972 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2973 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2974 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2975 v->bitpos = bitpos % container_bitsize;
2976 else
2977 v->bitpos = bitpos % 8;
2978 v->offset = (value_embedded_offset (arg1)
2979 + offset
2980 + (bitpos - v->bitpos) / 8);
2981 set_value_parent (v, arg1);
2982 if (!value_lazy (arg1))
2983 value_fetch_lazy (v);
2984 }
2985 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2986 {
2987 /* This field is actually a base subobject, so preserve the
2988 entire object's contents for later references to virtual
2989 bases, etc. */
2990 int boffset;
2991
2992 /* Lazy register values with offsets are not supported. */
2993 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2994 value_fetch_lazy (arg1);
2995
2996 /* We special case virtual inheritance here because this
2997 requires access to the contents, which we would rather avoid
2998 for references to ordinary fields of unavailable values. */
2999 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3000 boffset = baseclass_offset (arg_type, fieldno,
3001 value_contents (arg1),
3002 value_embedded_offset (arg1),
3003 value_address (arg1),
3004 arg1);
3005 else
3006 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3007
3008 if (value_lazy (arg1))
3009 v = allocate_value_lazy (value_enclosing_type (arg1));
3010 else
3011 {
3012 v = allocate_value (value_enclosing_type (arg1));
3013 value_contents_copy_raw (v, 0, arg1, 0,
3014 TYPE_LENGTH (value_enclosing_type (arg1)));
3015 }
3016 v->type = type;
3017 v->offset = value_offset (arg1);
3018 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3019 }
3020 else
3021 {
3022 /* Plain old data member */
3023 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3024
3025 /* Lazy register values with offsets are not supported. */
3026 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3027 value_fetch_lazy (arg1);
3028
3029 if (value_lazy (arg1))
3030 v = allocate_value_lazy (type);
3031 else
3032 {
3033 v = allocate_value (type);
3034 value_contents_copy_raw (v, value_embedded_offset (v),
3035 arg1, value_embedded_offset (arg1) + offset,
3036 TYPE_LENGTH (type));
3037 }
3038 v->offset = (value_offset (arg1) + offset
3039 + value_embedded_offset (arg1));
3040 }
3041 set_value_component_location (v, arg1);
3042 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3043 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3044 return v;
3045 }
3046
3047 /* Given a value ARG1 of a struct or union type,
3048 extract and return the value of one of its (non-static) fields.
3049 FIELDNO says which field. */
3050
3051 struct value *
3052 value_field (struct value *arg1, int fieldno)
3053 {
3054 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3055 }
3056
3057 /* Return a non-virtual function as a value.
3058 F is the list of member functions which contains the desired method.
3059 J is an index into F which provides the desired method.
3060
3061 We only use the symbol for its address, so be happy with either a
3062 full symbol or a minimal symbol. */
3063
3064 struct value *
3065 value_fn_field (struct value **arg1p, struct fn_field *f,
3066 int j, struct type *type,
3067 int offset)
3068 {
3069 struct value *v;
3070 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3071 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3072 struct symbol *sym;
3073 struct bound_minimal_symbol msym;
3074
3075 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
3076 if (sym != NULL)
3077 {
3078 memset (&msym, 0, sizeof (msym));
3079 }
3080 else
3081 {
3082 gdb_assert (sym == NULL);
3083 msym = lookup_bound_minimal_symbol (physname);
3084 if (msym.minsym == NULL)
3085 return NULL;
3086 }
3087
3088 v = allocate_value (ftype);
3089 if (sym)
3090 {
3091 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3092 }
3093 else
3094 {
3095 /* The minimal symbol might point to a function descriptor;
3096 resolve it to the actual code address instead. */
3097 struct objfile *objfile = msym.objfile;
3098 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3099
3100 set_value_address (v,
3101 gdbarch_convert_from_func_ptr_addr
3102 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3103 }
3104
3105 if (arg1p)
3106 {
3107 if (type != value_type (*arg1p))
3108 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3109 value_addr (*arg1p)));
3110
3111 /* Move the `this' pointer according to the offset.
3112 VALUE_OFFSET (*arg1p) += offset; */
3113 }
3114
3115 return v;
3116 }
3117
3118 \f
3119
3120 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3121 VALADDR, and store the result in *RESULT.
3122 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3123
3124 Extracting bits depends on endianness of the machine. Compute the
3125 number of least significant bits to discard. For big endian machines,
3126 we compute the total number of bits in the anonymous object, subtract
3127 off the bit count from the MSB of the object to the MSB of the
3128 bitfield, then the size of the bitfield, which leaves the LSB discard
3129 count. For little endian machines, the discard count is simply the
3130 number of bits from the LSB of the anonymous object to the LSB of the
3131 bitfield.
3132
3133 If the field is signed, we also do sign extension. */
3134
3135 static LONGEST
3136 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3137 int bitpos, int bitsize)
3138 {
3139 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3140 ULONGEST val;
3141 ULONGEST valmask;
3142 int lsbcount;
3143 int bytes_read;
3144 int read_offset;
3145
3146 /* Read the minimum number of bytes required; there may not be
3147 enough bytes to read an entire ULONGEST. */
3148 CHECK_TYPEDEF (field_type);
3149 if (bitsize)
3150 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3151 else
3152 bytes_read = TYPE_LENGTH (field_type);
3153
3154 read_offset = bitpos / 8;
3155
3156 val = extract_unsigned_integer (valaddr + read_offset,
3157 bytes_read, byte_order);
3158
3159 /* Extract bits. See comment above. */
3160
3161 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3162 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3163 else
3164 lsbcount = (bitpos % 8);
3165 val >>= lsbcount;
3166
3167 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3168 If the field is signed, and is negative, then sign extend. */
3169
3170 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3171 {
3172 valmask = (((ULONGEST) 1) << bitsize) - 1;
3173 val &= valmask;
3174 if (!TYPE_UNSIGNED (field_type))
3175 {
3176 if (val & (valmask ^ (valmask >> 1)))
3177 {
3178 val |= ~valmask;
3179 }
3180 }
3181 }
3182
3183 return val;
3184 }
3185
3186 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3187 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3188 ORIGINAL_VALUE, which must not be NULL. See
3189 unpack_value_bits_as_long for more details. */
3190
3191 int
3192 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3193 int embedded_offset, int fieldno,
3194 const struct value *val, LONGEST *result)
3195 {
3196 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3197 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3198 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3199 int bit_offset;
3200
3201 gdb_assert (val != NULL);
3202
3203 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3204 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3205 || !value_bits_available (val, bit_offset, bitsize))
3206 return 0;
3207
3208 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3209 bitpos, bitsize);
3210 return 1;
3211 }
3212
3213 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3214 object at VALADDR. See unpack_bits_as_long for more details. */
3215
3216 LONGEST
3217 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3218 {
3219 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3220 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3221 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3222
3223 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3224 }
3225
3226 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3227 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3228 the contents in DEST_VAL, zero or sign extending if the type of
3229 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3230 VAL. If the VAL's contents required to extract the bitfield from
3231 are unavailable/optimized out, DEST_VAL is correspondingly
3232 marked unavailable/optimized out. */
3233
3234 static void
3235 unpack_value_bitfield (struct value *dest_val,
3236 int bitpos, int bitsize,
3237 const gdb_byte *valaddr, int embedded_offset,
3238 const struct value *val)
3239 {
3240 enum bfd_endian byte_order;
3241 int src_bit_offset;
3242 int dst_bit_offset;
3243 LONGEST num;
3244 struct type *field_type = value_type (dest_val);
3245
3246 /* First, unpack and sign extend the bitfield as if it was wholly
3247 available. Invalid/unavailable bits are read as zero, but that's
3248 OK, as they'll end up marked below. */
3249 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3250 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3251 bitpos, bitsize);
3252 store_signed_integer (value_contents_raw (dest_val),
3253 TYPE_LENGTH (field_type), byte_order, num);
3254
3255 /* Now copy the optimized out / unavailability ranges to the right
3256 bits. */
3257 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3258 if (byte_order == BFD_ENDIAN_BIG)
3259 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3260 else
3261 dst_bit_offset = 0;
3262 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3263 val, src_bit_offset, bitsize);
3264 }
3265
3266 /* Return a new value with type TYPE, which is FIELDNO field of the
3267 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3268 of VAL. If the VAL's contents required to extract the bitfield
3269 from are unavailable/optimized out, the new value is
3270 correspondingly marked unavailable/optimized out. */
3271
3272 struct value *
3273 value_field_bitfield (struct type *type, int fieldno,
3274 const gdb_byte *valaddr,
3275 int embedded_offset, const struct value *val)
3276 {
3277 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3278 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3279 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3280
3281 unpack_value_bitfield (res_val, bitpos, bitsize,
3282 valaddr, embedded_offset, val);
3283
3284 return res_val;
3285 }
3286
3287 /* Modify the value of a bitfield. ADDR points to a block of memory in
3288 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3289 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3290 indicate which bits (in target bit order) comprise the bitfield.
3291 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3292 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3293
3294 void
3295 modify_field (struct type *type, gdb_byte *addr,
3296 LONGEST fieldval, int bitpos, int bitsize)
3297 {
3298 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3299 ULONGEST oword;
3300 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3301 int bytesize;
3302
3303 /* Normalize BITPOS. */
3304 addr += bitpos / 8;
3305 bitpos %= 8;
3306
3307 /* If a negative fieldval fits in the field in question, chop
3308 off the sign extension bits. */
3309 if ((~fieldval & ~(mask >> 1)) == 0)
3310 fieldval &= mask;
3311
3312 /* Warn if value is too big to fit in the field in question. */
3313 if (0 != (fieldval & ~mask))
3314 {
3315 /* FIXME: would like to include fieldval in the message, but
3316 we don't have a sprintf_longest. */
3317 warning (_("Value does not fit in %d bits."), bitsize);
3318
3319 /* Truncate it, otherwise adjoining fields may be corrupted. */
3320 fieldval &= mask;
3321 }
3322
3323 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3324 false valgrind reports. */
3325
3326 bytesize = (bitpos + bitsize + 7) / 8;
3327 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3328
3329 /* Shifting for bit field depends on endianness of the target machine. */
3330 if (gdbarch_bits_big_endian (get_type_arch (type)))
3331 bitpos = bytesize * 8 - bitpos - bitsize;
3332
3333 oword &= ~(mask << bitpos);
3334 oword |= fieldval << bitpos;
3335
3336 store_unsigned_integer (addr, bytesize, byte_order, oword);
3337 }
3338 \f
3339 /* Pack NUM into BUF using a target format of TYPE. */
3340
3341 void
3342 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3343 {
3344 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3345 int len;
3346
3347 type = check_typedef (type);
3348 len = TYPE_LENGTH (type);
3349
3350 switch (TYPE_CODE (type))
3351 {
3352 case TYPE_CODE_INT:
3353 case TYPE_CODE_CHAR:
3354 case TYPE_CODE_ENUM:
3355 case TYPE_CODE_FLAGS:
3356 case TYPE_CODE_BOOL:
3357 case TYPE_CODE_RANGE:
3358 case TYPE_CODE_MEMBERPTR:
3359 store_signed_integer (buf, len, byte_order, num);
3360 break;
3361
3362 case TYPE_CODE_REF:
3363 case TYPE_CODE_PTR:
3364 store_typed_address (buf, type, (CORE_ADDR) num);
3365 break;
3366
3367 default:
3368 error (_("Unexpected type (%d) encountered for integer constant."),
3369 TYPE_CODE (type));
3370 }
3371 }
3372
3373
3374 /* Pack NUM into BUF using a target format of TYPE. */
3375
3376 static void
3377 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3378 {
3379 int len;
3380 enum bfd_endian byte_order;
3381
3382 type = check_typedef (type);
3383 len = TYPE_LENGTH (type);
3384 byte_order = gdbarch_byte_order (get_type_arch (type));
3385
3386 switch (TYPE_CODE (type))
3387 {
3388 case TYPE_CODE_INT:
3389 case TYPE_CODE_CHAR:
3390 case TYPE_CODE_ENUM:
3391 case TYPE_CODE_FLAGS:
3392 case TYPE_CODE_BOOL:
3393 case TYPE_CODE_RANGE:
3394 case TYPE_CODE_MEMBERPTR:
3395 store_unsigned_integer (buf, len, byte_order, num);
3396 break;
3397
3398 case TYPE_CODE_REF:
3399 case TYPE_CODE_PTR:
3400 store_typed_address (buf, type, (CORE_ADDR) num);
3401 break;
3402
3403 default:
3404 error (_("Unexpected type (%d) encountered "
3405 "for unsigned integer constant."),
3406 TYPE_CODE (type));
3407 }
3408 }
3409
3410
3411 /* Convert C numbers into newly allocated values. */
3412
3413 struct value *
3414 value_from_longest (struct type *type, LONGEST num)
3415 {
3416 struct value *val = allocate_value (type);
3417
3418 pack_long (value_contents_raw (val), type, num);
3419 return val;
3420 }
3421
3422
3423 /* Convert C unsigned numbers into newly allocated values. */
3424
3425 struct value *
3426 value_from_ulongest (struct type *type, ULONGEST num)
3427 {
3428 struct value *val = allocate_value (type);
3429
3430 pack_unsigned_long (value_contents_raw (val), type, num);
3431
3432 return val;
3433 }
3434
3435
3436 /* Create a value representing a pointer of type TYPE to the address
3437 ADDR. */
3438
3439 struct value *
3440 value_from_pointer (struct type *type, CORE_ADDR addr)
3441 {
3442 struct value *val = allocate_value (type);
3443
3444 store_typed_address (value_contents_raw (val),
3445 check_typedef (type), addr);
3446 return val;
3447 }
3448
3449
3450 /* Create a value of type TYPE whose contents come from VALADDR, if it
3451 is non-null, and whose memory address (in the inferior) is
3452 ADDRESS. The type of the created value may differ from the passed
3453 type TYPE. Make sure to retrieve values new type after this call.
3454 Note that TYPE is not passed through resolve_dynamic_type; this is
3455 a special API intended for use only by Ada. */
3456
3457 struct value *
3458 value_from_contents_and_address_unresolved (struct type *type,
3459 const gdb_byte *valaddr,
3460 CORE_ADDR address)
3461 {
3462 struct value *v;
3463
3464 if (valaddr == NULL)
3465 v = allocate_value_lazy (type);
3466 else
3467 v = value_from_contents (type, valaddr);
3468 set_value_address (v, address);
3469 VALUE_LVAL (v) = lval_memory;
3470 return v;
3471 }
3472
3473 /* Create a value of type TYPE whose contents come from VALADDR, if it
3474 is non-null, and whose memory address (in the inferior) is
3475 ADDRESS. The type of the created value may differ from the passed
3476 type TYPE. Make sure to retrieve values new type after this call. */
3477
3478 struct value *
3479 value_from_contents_and_address (struct type *type,
3480 const gdb_byte *valaddr,
3481 CORE_ADDR address)
3482 {
3483 struct type *resolved_type = resolve_dynamic_type (type, address);
3484 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3485 struct value *v;
3486
3487 if (valaddr == NULL)
3488 v = allocate_value_lazy (resolved_type);
3489 else
3490 v = value_from_contents (resolved_type, valaddr);
3491 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3492 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3493 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3494 set_value_address (v, address);
3495 VALUE_LVAL (v) = lval_memory;
3496 return v;
3497 }
3498
3499 /* Create a value of type TYPE holding the contents CONTENTS.
3500 The new value is `not_lval'. */
3501
3502 struct value *
3503 value_from_contents (struct type *type, const gdb_byte *contents)
3504 {
3505 struct value *result;
3506
3507 result = allocate_value (type);
3508 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3509 return result;
3510 }
3511
3512 struct value *
3513 value_from_double (struct type *type, DOUBLEST num)
3514 {
3515 struct value *val = allocate_value (type);
3516 struct type *base_type = check_typedef (type);
3517 enum type_code code = TYPE_CODE (base_type);
3518
3519 if (code == TYPE_CODE_FLT)
3520 {
3521 store_typed_floating (value_contents_raw (val), base_type, num);
3522 }
3523 else
3524 error (_("Unexpected type encountered for floating constant."));
3525
3526 return val;
3527 }
3528
3529 struct value *
3530 value_from_decfloat (struct type *type, const gdb_byte *dec)
3531 {
3532 struct value *val = allocate_value (type);
3533
3534 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3535 return val;
3536 }
3537
3538 /* Extract a value from the history file. Input will be of the form
3539 $digits or $$digits. See block comment above 'write_dollar_variable'
3540 for details. */
3541
3542 struct value *
3543 value_from_history_ref (const char *h, const char **endp)
3544 {
3545 int index, len;
3546
3547 if (h[0] == '$')
3548 len = 1;
3549 else
3550 return NULL;
3551
3552 if (h[1] == '$')
3553 len = 2;
3554
3555 /* Find length of numeral string. */
3556 for (; isdigit (h[len]); len++)
3557 ;
3558
3559 /* Make sure numeral string is not part of an identifier. */
3560 if (h[len] == '_' || isalpha (h[len]))
3561 return NULL;
3562
3563 /* Now collect the index value. */
3564 if (h[1] == '$')
3565 {
3566 if (len == 2)
3567 {
3568 /* For some bizarre reason, "$$" is equivalent to "$$1",
3569 rather than to "$$0" as it ought to be! */
3570 index = -1;
3571 *endp += len;
3572 }
3573 else
3574 {
3575 char *local_end;
3576
3577 index = -strtol (&h[2], &local_end, 10);
3578 *endp = local_end;
3579 }
3580 }
3581 else
3582 {
3583 if (len == 1)
3584 {
3585 /* "$" is equivalent to "$0". */
3586 index = 0;
3587 *endp += len;
3588 }
3589 else
3590 {
3591 char *local_end;
3592
3593 index = strtol (&h[1], &local_end, 10);
3594 *endp = local_end;
3595 }
3596 }
3597
3598 return access_value_history (index);
3599 }
3600
3601 struct value *
3602 coerce_ref_if_computed (const struct value *arg)
3603 {
3604 const struct lval_funcs *funcs;
3605
3606 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3607 return NULL;
3608
3609 if (value_lval_const (arg) != lval_computed)
3610 return NULL;
3611
3612 funcs = value_computed_funcs (arg);
3613 if (funcs->coerce_ref == NULL)
3614 return NULL;
3615
3616 return funcs->coerce_ref (arg);
3617 }
3618
3619 /* Look at value.h for description. */
3620
3621 struct value *
3622 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3623 struct type *original_type,
3624 struct value *original_value)
3625 {
3626 /* Re-adjust type. */
3627 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3628
3629 /* Add embedding info. */
3630 set_value_enclosing_type (value, enc_type);
3631 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3632
3633 /* We may be pointing to an object of some derived type. */
3634 return value_full_object (value, NULL, 0, 0, 0);
3635 }
3636
3637 struct value *
3638 coerce_ref (struct value *arg)
3639 {
3640 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3641 struct value *retval;
3642 struct type *enc_type;
3643
3644 retval = coerce_ref_if_computed (arg);
3645 if (retval)
3646 return retval;
3647
3648 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3649 return arg;
3650
3651 enc_type = check_typedef (value_enclosing_type (arg));
3652 enc_type = TYPE_TARGET_TYPE (enc_type);
3653
3654 retval = value_at_lazy (enc_type,
3655 unpack_pointer (value_type (arg),
3656 value_contents (arg)));
3657 enc_type = value_type (retval);
3658 return readjust_indirect_value_type (retval, enc_type,
3659 value_type_arg_tmp, arg);
3660 }
3661
3662 struct value *
3663 coerce_array (struct value *arg)
3664 {
3665 struct type *type;
3666
3667 arg = coerce_ref (arg);
3668 type = check_typedef (value_type (arg));
3669
3670 switch (TYPE_CODE (type))
3671 {
3672 case TYPE_CODE_ARRAY:
3673 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3674 arg = value_coerce_array (arg);
3675 break;
3676 case TYPE_CODE_FUNC:
3677 arg = value_coerce_function (arg);
3678 break;
3679 }
3680 return arg;
3681 }
3682 \f
3683
3684 /* Return the return value convention that will be used for the
3685 specified type. */
3686
3687 enum return_value_convention
3688 struct_return_convention (struct gdbarch *gdbarch,
3689 struct value *function, struct type *value_type)
3690 {
3691 enum type_code code = TYPE_CODE (value_type);
3692
3693 if (code == TYPE_CODE_ERROR)
3694 error (_("Function return type unknown."));
3695
3696 /* Probe the architecture for the return-value convention. */
3697 return gdbarch_return_value (gdbarch, function, value_type,
3698 NULL, NULL, NULL);
3699 }
3700
3701 /* Return true if the function returning the specified type is using
3702 the convention of returning structures in memory (passing in the
3703 address as a hidden first parameter). */
3704
3705 int
3706 using_struct_return (struct gdbarch *gdbarch,
3707 struct value *function, struct type *value_type)
3708 {
3709 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3710 /* A void return value is never in memory. See also corresponding
3711 code in "print_return_value". */
3712 return 0;
3713
3714 return (struct_return_convention (gdbarch, function, value_type)
3715 != RETURN_VALUE_REGISTER_CONVENTION);
3716 }
3717
3718 /* Set the initialized field in a value struct. */
3719
3720 void
3721 set_value_initialized (struct value *val, int status)
3722 {
3723 val->initialized = status;
3724 }
3725
3726 /* Return the initialized field in a value struct. */
3727
3728 int
3729 value_initialized (struct value *val)
3730 {
3731 return val->initialized;
3732 }
3733
3734 /* Called only from the value_contents and value_contents_all()
3735 macros, if the current data for a variable needs to be loaded into
3736 value_contents(VAL). Fetches the data from the user's process, and
3737 clears the lazy flag to indicate that the data in the buffer is
3738 valid.
3739
3740 If the value is zero-length, we avoid calling read_memory, which
3741 would abort. We mark the value as fetched anyway -- all 0 bytes of
3742 it.
3743
3744 This function returns a value because it is used in the
3745 value_contents macro as part of an expression, where a void would
3746 not work. The value is ignored. */
3747
3748 int
3749 value_fetch_lazy (struct value *val)
3750 {
3751 gdb_assert (value_lazy (val));
3752 allocate_value_contents (val);
3753 /* A value is either lazy, or fully fetched. The
3754 availability/validity is only established as we try to fetch a
3755 value. */
3756 gdb_assert (VEC_empty (range_s, val->optimized_out));
3757 gdb_assert (VEC_empty (range_s, val->unavailable));
3758 if (value_bitsize (val))
3759 {
3760 /* To read a lazy bitfield, read the entire enclosing value. This
3761 prevents reading the same block of (possibly volatile) memory once
3762 per bitfield. It would be even better to read only the containing
3763 word, but we have no way to record that just specific bits of a
3764 value have been fetched. */
3765 struct type *type = check_typedef (value_type (val));
3766 struct value *parent = value_parent (val);
3767
3768 if (value_lazy (parent))
3769 value_fetch_lazy (parent);
3770
3771 unpack_value_bitfield (val,
3772 value_bitpos (val), value_bitsize (val),
3773 value_contents_for_printing (parent),
3774 value_offset (val), parent);
3775 }
3776 else if (VALUE_LVAL (val) == lval_memory)
3777 {
3778 CORE_ADDR addr = value_address (val);
3779 struct type *type = check_typedef (value_enclosing_type (val));
3780
3781 if (TYPE_LENGTH (type))
3782 read_value_memory (val, 0, value_stack (val),
3783 addr, value_contents_all_raw (val),
3784 TYPE_LENGTH (type));
3785 }
3786 else if (VALUE_LVAL (val) == lval_register)
3787 {
3788 struct frame_info *frame;
3789 int regnum;
3790 struct type *type = check_typedef (value_type (val));
3791 struct value *new_val = val, *mark = value_mark ();
3792
3793 /* Offsets are not supported here; lazy register values must
3794 refer to the entire register. */
3795 gdb_assert (value_offset (val) == 0);
3796
3797 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3798 {
3799 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3800
3801 frame = frame_find_by_id (frame_id);
3802 regnum = VALUE_REGNUM (new_val);
3803
3804 gdb_assert (frame != NULL);
3805
3806 /* Convertible register routines are used for multi-register
3807 values and for interpretation in different types
3808 (e.g. float or int from a double register). Lazy
3809 register values should have the register's natural type,
3810 so they do not apply. */
3811 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3812 regnum, type));
3813
3814 new_val = get_frame_register_value (frame, regnum);
3815
3816 /* If we get another lazy lval_register value, it means the
3817 register is found by reading it from the next frame.
3818 get_frame_register_value should never return a value with
3819 the frame id pointing to FRAME. If it does, it means we
3820 either have two consecutive frames with the same frame id
3821 in the frame chain, or some code is trying to unwind
3822 behind get_prev_frame's back (e.g., a frame unwind
3823 sniffer trying to unwind), bypassing its validations. In
3824 any case, it should always be an internal error to end up
3825 in this situation. */
3826 if (VALUE_LVAL (new_val) == lval_register
3827 && value_lazy (new_val)
3828 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3829 internal_error (__FILE__, __LINE__,
3830 _("infinite loop while fetching a register"));
3831 }
3832
3833 /* If it's still lazy (for instance, a saved register on the
3834 stack), fetch it. */
3835 if (value_lazy (new_val))
3836 value_fetch_lazy (new_val);
3837
3838 /* Copy the contents and the unavailability/optimized-out
3839 meta-data from NEW_VAL to VAL. */
3840 set_value_lazy (val, 0);
3841 value_contents_copy (val, value_embedded_offset (val),
3842 new_val, value_embedded_offset (new_val),
3843 TYPE_LENGTH (type));
3844
3845 if (frame_debug)
3846 {
3847 struct gdbarch *gdbarch;
3848 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3849 regnum = VALUE_REGNUM (val);
3850 gdbarch = get_frame_arch (frame);
3851
3852 fprintf_unfiltered (gdb_stdlog,
3853 "{ value_fetch_lazy "
3854 "(frame=%d,regnum=%d(%s),...) ",
3855 frame_relative_level (frame), regnum,
3856 user_reg_map_regnum_to_name (gdbarch, regnum));
3857
3858 fprintf_unfiltered (gdb_stdlog, "->");
3859 if (value_optimized_out (new_val))
3860 {
3861 fprintf_unfiltered (gdb_stdlog, " ");
3862 val_print_optimized_out (new_val, gdb_stdlog);
3863 }
3864 else
3865 {
3866 int i;
3867 const gdb_byte *buf = value_contents (new_val);
3868
3869 if (VALUE_LVAL (new_val) == lval_register)
3870 fprintf_unfiltered (gdb_stdlog, " register=%d",
3871 VALUE_REGNUM (new_val));
3872 else if (VALUE_LVAL (new_val) == lval_memory)
3873 fprintf_unfiltered (gdb_stdlog, " address=%s",
3874 paddress (gdbarch,
3875 value_address (new_val)));
3876 else
3877 fprintf_unfiltered (gdb_stdlog, " computed");
3878
3879 fprintf_unfiltered (gdb_stdlog, " bytes=");
3880 fprintf_unfiltered (gdb_stdlog, "[");
3881 for (i = 0; i < register_size (gdbarch, regnum); i++)
3882 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3883 fprintf_unfiltered (gdb_stdlog, "]");
3884 }
3885
3886 fprintf_unfiltered (gdb_stdlog, " }\n");
3887 }
3888
3889 /* Dispose of the intermediate values. This prevents
3890 watchpoints from trying to watch the saved frame pointer. */
3891 value_free_to_mark (mark);
3892 }
3893 else if (VALUE_LVAL (val) == lval_computed
3894 && value_computed_funcs (val)->read != NULL)
3895 value_computed_funcs (val)->read (val);
3896 else
3897 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3898
3899 set_value_lazy (val, 0);
3900 return 0;
3901 }
3902
3903 /* Implementation of the convenience function $_isvoid. */
3904
3905 static struct value *
3906 isvoid_internal_fn (struct gdbarch *gdbarch,
3907 const struct language_defn *language,
3908 void *cookie, int argc, struct value **argv)
3909 {
3910 int ret;
3911
3912 if (argc != 1)
3913 error (_("You must provide one argument for $_isvoid."));
3914
3915 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3916
3917 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3918 }
3919
3920 void
3921 _initialize_values (void)
3922 {
3923 add_cmd ("convenience", no_class, show_convenience, _("\
3924 Debugger convenience (\"$foo\") variables and functions.\n\
3925 Convenience variables are created when you assign them values;\n\
3926 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3927 \n\
3928 A few convenience variables are given values automatically:\n\
3929 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3930 \"$__\" holds the contents of the last address examined with \"x\"."
3931 #ifdef HAVE_PYTHON
3932 "\n\n\
3933 Convenience functions are defined via the Python API."
3934 #endif
3935 ), &showlist);
3936 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3937
3938 add_cmd ("values", no_set_class, show_values, _("\
3939 Elements of value history around item number IDX (or last ten)."),
3940 &showlist);
3941
3942 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3943 Initialize a convenience variable if necessary.\n\
3944 init-if-undefined VARIABLE = EXPRESSION\n\
3945 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3946 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3947 VARIABLE is already initialized."));
3948
3949 add_prefix_cmd ("function", no_class, function_command, _("\
3950 Placeholder command for showing help on convenience functions."),
3951 &functionlist, "function ", 0, &cmdlist);
3952
3953 add_internal_function ("_isvoid", _("\
3954 Check whether an expression is void.\n\
3955 Usage: $_isvoid (expression)\n\
3956 Return 1 if the expression is void, zero otherwise."),
3957 isvoid_internal_fn, NULL);
3958 }
This page took 0.15333 seconds and 4 git commands to generate.