cec1d4a9f0994fbcec96aef11579d37b94196194
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "exceptions.h"
39 #include "extension.h"
40 #include <ctype.h>
41 #include "tracepoint.h"
42 #include "cp-abi.h"
43 #include "user-regs.h"
44
45 /* Prototypes for exported functions. */
46
47 void _initialize_values (void);
48
49 /* Definition of a user function. */
50 struct internal_function
51 {
52 /* The name of the function. It is a bit odd to have this in the
53 function itself -- the user might use a differently-named
54 convenience variable to hold the function. */
55 char *name;
56
57 /* The handler. */
58 internal_function_fn handler;
59
60 /* User data for the handler. */
61 void *cookie;
62 };
63
64 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
65
66 struct range
67 {
68 /* Lowest offset in the range. */
69 int offset;
70
71 /* Length of the range. */
72 int length;
73 };
74
75 typedef struct range range_s;
76
77 DEF_VEC_O(range_s);
78
79 /* Returns true if the ranges defined by [offset1, offset1+len1) and
80 [offset2, offset2+len2) overlap. */
81
82 static int
83 ranges_overlap (int offset1, int len1,
84 int offset2, int len2)
85 {
86 ULONGEST h, l;
87
88 l = max (offset1, offset2);
89 h = min (offset1 + len1, offset2 + len2);
90 return (l < h);
91 }
92
93 /* Returns true if the first argument is strictly less than the
94 second, useful for VEC_lower_bound. We keep ranges sorted by
95 offset and coalesce overlapping and contiguous ranges, so this just
96 compares the starting offset. */
97
98 static int
99 range_lessthan (const range_s *r1, const range_s *r2)
100 {
101 return r1->offset < r2->offset;
102 }
103
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 OFFSET+LENGTH). */
106
107 static int
108 ranges_contain (VEC(range_s) *ranges, int offset, int length)
109 {
110 range_s what;
111 int i;
112
113 what.offset = offset;
114 what.length = length;
115
116 /* We keep ranges sorted by offset and coalesce overlapping and
117 contiguous ranges, so to check if a range list contains a given
118 range, we can do a binary search for the position the given range
119 would be inserted if we only considered the starting OFFSET of
120 ranges. We call that position I. Since we also have LENGTH to
121 care for (this is a range afterall), we need to check if the
122 _previous_ range overlaps the I range. E.g.,
123
124 R
125 |---|
126 |---| |---| |------| ... |--|
127 0 1 2 N
128
129 I=1
130
131 In the case above, the binary search would return `I=1', meaning,
132 this OFFSET should be inserted at position 1, and the current
133 position 1 should be pushed further (and before 2). But, `0'
134 overlaps with R.
135
136 Then we need to check if the I range overlaps the I range itself.
137 E.g.,
138
139 R
140 |---|
141 |---| |---| |-------| ... |--|
142 0 1 2 N
143
144 I=1
145 */
146
147 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
148
149 if (i > 0)
150 {
151 struct range *bef = VEC_index (range_s, ranges, i - 1);
152
153 if (ranges_overlap (bef->offset, bef->length, offset, length))
154 return 1;
155 }
156
157 if (i < VEC_length (range_s, ranges))
158 {
159 struct range *r = VEC_index (range_s, ranges, i);
160
161 if (ranges_overlap (r->offset, r->length, offset, length))
162 return 1;
163 }
164
165 return 0;
166 }
167
168 static struct cmd_list_element *functionlist;
169
170 /* Note that the fields in this structure are arranged to save a bit
171 of memory. */
172
173 struct value
174 {
175 /* Type of value; either not an lval, or one of the various
176 different possible kinds of lval. */
177 enum lval_type lval;
178
179 /* Is it modifiable? Only relevant if lval != not_lval. */
180 unsigned int modifiable : 1;
181
182 /* If zero, contents of this value are in the contents field. If
183 nonzero, contents are in inferior. If the lval field is lval_memory,
184 the contents are in inferior memory at location.address plus offset.
185 The lval field may also be lval_register.
186
187 WARNING: This field is used by the code which handles watchpoints
188 (see breakpoint.c) to decide whether a particular value can be
189 watched by hardware watchpoints. If the lazy flag is set for
190 some member of a value chain, it is assumed that this member of
191 the chain doesn't need to be watched as part of watching the
192 value itself. This is how GDB avoids watching the entire struct
193 or array when the user wants to watch a single struct member or
194 array element. If you ever change the way lazy flag is set and
195 reset, be sure to consider this use as well! */
196 unsigned int lazy : 1;
197
198 /* If nonzero, this is the value of a variable that does not
199 actually exist in the program. If nonzero, and LVAL is
200 lval_register, this is a register ($pc, $sp, etc., never a
201 program variable) that has not been saved in the frame. All
202 optimized-out values are treated pretty much the same, except
203 registers have a different string representation and related
204 error strings. */
205 unsigned int optimized_out : 1;
206
207 /* If value is a variable, is it initialized or not. */
208 unsigned int initialized : 1;
209
210 /* If value is from the stack. If this is set, read_stack will be
211 used instead of read_memory to enable extra caching. */
212 unsigned int stack : 1;
213
214 /* If the value has been released. */
215 unsigned int released : 1;
216
217 /* Register number if the value is from a register. */
218 short regnum;
219
220 /* Location of value (if lval). */
221 union
222 {
223 /* If lval == lval_memory, this is the address in the inferior.
224 If lval == lval_register, this is the byte offset into the
225 registers structure. */
226 CORE_ADDR address;
227
228 /* Pointer to internal variable. */
229 struct internalvar *internalvar;
230
231 /* Pointer to xmethod worker. */
232 struct xmethod_worker *xm_worker;
233
234 /* If lval == lval_computed, this is a set of function pointers
235 to use to access and describe the value, and a closure pointer
236 for them to use. */
237 struct
238 {
239 /* Functions to call. */
240 const struct lval_funcs *funcs;
241
242 /* Closure for those functions to use. */
243 void *closure;
244 } computed;
245 } location;
246
247 /* Describes offset of a value within lval of a structure in bytes.
248 If lval == lval_memory, this is an offset to the address. If
249 lval == lval_register, this is a further offset from
250 location.address within the registers structure. Note also the
251 member embedded_offset below. */
252 int offset;
253
254 /* Only used for bitfields; number of bits contained in them. */
255 int bitsize;
256
257 /* Only used for bitfields; position of start of field. For
258 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
259 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
260 int bitpos;
261
262 /* The number of references to this value. When a value is created,
263 the value chain holds a reference, so REFERENCE_COUNT is 1. If
264 release_value is called, this value is removed from the chain but
265 the caller of release_value now has a reference to this value.
266 The caller must arrange for a call to value_free later. */
267 int reference_count;
268
269 /* Only used for bitfields; the containing value. This allows a
270 single read from the target when displaying multiple
271 bitfields. */
272 struct value *parent;
273
274 /* Frame register value is relative to. This will be described in
275 the lval enum above as "lval_register". */
276 struct frame_id frame_id;
277
278 /* Type of the value. */
279 struct type *type;
280
281 /* If a value represents a C++ object, then the `type' field gives
282 the object's compile-time type. If the object actually belongs
283 to some class derived from `type', perhaps with other base
284 classes and additional members, then `type' is just a subobject
285 of the real thing, and the full object is probably larger than
286 `type' would suggest.
287
288 If `type' is a dynamic class (i.e. one with a vtable), then GDB
289 can actually determine the object's run-time type by looking at
290 the run-time type information in the vtable. When this
291 information is available, we may elect to read in the entire
292 object, for several reasons:
293
294 - When printing the value, the user would probably rather see the
295 full object, not just the limited portion apparent from the
296 compile-time type.
297
298 - If `type' has virtual base classes, then even printing `type'
299 alone may require reaching outside the `type' portion of the
300 object to wherever the virtual base class has been stored.
301
302 When we store the entire object, `enclosing_type' is the run-time
303 type -- the complete object -- and `embedded_offset' is the
304 offset of `type' within that larger type, in bytes. The
305 value_contents() macro takes `embedded_offset' into account, so
306 most GDB code continues to see the `type' portion of the value,
307 just as the inferior would.
308
309 If `type' is a pointer to an object, then `enclosing_type' is a
310 pointer to the object's run-time type, and `pointed_to_offset' is
311 the offset in bytes from the full object to the pointed-to object
312 -- that is, the value `embedded_offset' would have if we followed
313 the pointer and fetched the complete object. (I don't really see
314 the point. Why not just determine the run-time type when you
315 indirect, and avoid the special case? The contents don't matter
316 until you indirect anyway.)
317
318 If we're not doing anything fancy, `enclosing_type' is equal to
319 `type', and `embedded_offset' is zero, so everything works
320 normally. */
321 struct type *enclosing_type;
322 int embedded_offset;
323 int pointed_to_offset;
324
325 /* Values are stored in a chain, so that they can be deleted easily
326 over calls to the inferior. Values assigned to internal
327 variables, put into the value history or exposed to Python are
328 taken off this list. */
329 struct value *next;
330
331 /* Actual contents of the value. Target byte-order. NULL or not
332 valid if lazy is nonzero. */
333 gdb_byte *contents;
334
335 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
336 rather than available, since the common and default case is for a
337 value to be available. This is filled in at value read time. The
338 unavailable ranges are tracked in bits. */
339 VEC(range_s) *unavailable;
340 };
341
342 int
343 value_bits_available (const struct value *value, int offset, int length)
344 {
345 gdb_assert (!value->lazy);
346
347 return !ranges_contain (value->unavailable, offset, length);
348 }
349
350 int
351 value_bytes_available (const struct value *value, int offset, int length)
352 {
353 return value_bits_available (value,
354 offset * TARGET_CHAR_BIT,
355 length * TARGET_CHAR_BIT);
356 }
357
358 int
359 value_entirely_available (struct value *value)
360 {
361 /* We can only tell whether the whole value is available when we try
362 to read it. */
363 if (value->lazy)
364 value_fetch_lazy (value);
365
366 if (VEC_empty (range_s, value->unavailable))
367 return 1;
368 return 0;
369 }
370
371 int
372 value_entirely_unavailable (struct value *value)
373 {
374 /* We can only tell whether the whole value is available when we try
375 to read it. */
376 if (value->lazy)
377 value_fetch_lazy (value);
378
379 if (VEC_length (range_s, value->unavailable) == 1)
380 {
381 struct range *t = VEC_index (range_s, value->unavailable, 0);
382
383 if (t->offset == 0
384 && t->length == (TARGET_CHAR_BIT
385 * TYPE_LENGTH (value_enclosing_type (value))))
386 return 1;
387 }
388
389 return 0;
390 }
391
392 void
393 mark_value_bits_unavailable (struct value *value, int offset, int length)
394 {
395 range_s newr;
396 int i;
397
398 /* Insert the range sorted. If there's overlap or the new range
399 would be contiguous with an existing range, merge. */
400
401 newr.offset = offset;
402 newr.length = length;
403
404 /* Do a binary search for the position the given range would be
405 inserted if we only considered the starting OFFSET of ranges.
406 Call that position I. Since we also have LENGTH to care for
407 (this is a range afterall), we need to check if the _previous_
408 range overlaps the I range. E.g., calling R the new range:
409
410 #1 - overlaps with previous
411
412 R
413 |-...-|
414 |---| |---| |------| ... |--|
415 0 1 2 N
416
417 I=1
418
419 In the case #1 above, the binary search would return `I=1',
420 meaning, this OFFSET should be inserted at position 1, and the
421 current position 1 should be pushed further (and become 2). But,
422 note that `0' overlaps with R, so we want to merge them.
423
424 A similar consideration needs to be taken if the new range would
425 be contiguous with the previous range:
426
427 #2 - contiguous with previous
428
429 R
430 |-...-|
431 |--| |---| |------| ... |--|
432 0 1 2 N
433
434 I=1
435
436 If there's no overlap with the previous range, as in:
437
438 #3 - not overlapping and not contiguous
439
440 R
441 |-...-|
442 |--| |---| |------| ... |--|
443 0 1 2 N
444
445 I=1
446
447 or if I is 0:
448
449 #4 - R is the range with lowest offset
450
451 R
452 |-...-|
453 |--| |---| |------| ... |--|
454 0 1 2 N
455
456 I=0
457
458 ... we just push the new range to I.
459
460 All the 4 cases above need to consider that the new range may
461 also overlap several of the ranges that follow, or that R may be
462 contiguous with the following range, and merge. E.g.,
463
464 #5 - overlapping following ranges
465
466 R
467 |------------------------|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=0
472
473 or:
474
475 R
476 |-------|
477 |--| |---| |------| ... |--|
478 0 1 2 N
479
480 I=1
481
482 */
483
484 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
485 if (i > 0)
486 {
487 struct range *bef = VEC_index (range_s, value->unavailable, i - 1);
488
489 if (ranges_overlap (bef->offset, bef->length, offset, length))
490 {
491 /* #1 */
492 ULONGEST l = min (bef->offset, offset);
493 ULONGEST h = max (bef->offset + bef->length, offset + length);
494
495 bef->offset = l;
496 bef->length = h - l;
497 i--;
498 }
499 else if (offset == bef->offset + bef->length)
500 {
501 /* #2 */
502 bef->length += length;
503 i--;
504 }
505 else
506 {
507 /* #3 */
508 VEC_safe_insert (range_s, value->unavailable, i, &newr);
509 }
510 }
511 else
512 {
513 /* #4 */
514 VEC_safe_insert (range_s, value->unavailable, i, &newr);
515 }
516
517 /* Check whether the ranges following the one we've just added or
518 touched can be folded in (#5 above). */
519 if (i + 1 < VEC_length (range_s, value->unavailable))
520 {
521 struct range *t;
522 struct range *r;
523 int removed = 0;
524 int next = i + 1;
525
526 /* Get the range we just touched. */
527 t = VEC_index (range_s, value->unavailable, i);
528 removed = 0;
529
530 i = next;
531 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
532 if (r->offset <= t->offset + t->length)
533 {
534 ULONGEST l, h;
535
536 l = min (t->offset, r->offset);
537 h = max (t->offset + t->length, r->offset + r->length);
538
539 t->offset = l;
540 t->length = h - l;
541
542 removed++;
543 }
544 else
545 {
546 /* If we couldn't merge this one, we won't be able to
547 merge following ones either, since the ranges are
548 always sorted by OFFSET. */
549 break;
550 }
551
552 if (removed != 0)
553 VEC_block_remove (range_s, value->unavailable, next, removed);
554 }
555 }
556
557 void
558 mark_value_bytes_unavailable (struct value *value, int offset, int length)
559 {
560 mark_value_bits_unavailable (value,
561 offset * TARGET_CHAR_BIT,
562 length * TARGET_CHAR_BIT);
563 }
564
565 /* Find the first range in RANGES that overlaps the range defined by
566 OFFSET and LENGTH, starting at element POS in the RANGES vector,
567 Returns the index into RANGES where such overlapping range was
568 found, or -1 if none was found. */
569
570 static int
571 find_first_range_overlap (VEC(range_s) *ranges, int pos,
572 int offset, int length)
573 {
574 range_s *r;
575 int i;
576
577 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
578 if (ranges_overlap (r->offset, r->length, offset, length))
579 return i;
580
581 return -1;
582 }
583
584 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
585 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
586 return non-zero.
587
588 It must always be the case that:
589 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
590
591 It is assumed that memory can be accessed from:
592 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
593 to:
594 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
595 / TARGET_CHAR_BIT) */
596 static int
597 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
598 const gdb_byte *ptr2, size_t offset2_bits,
599 size_t length_bits)
600 {
601 gdb_assert (offset1_bits % TARGET_CHAR_BIT
602 == offset2_bits % TARGET_CHAR_BIT);
603
604 if (offset1_bits % TARGET_CHAR_BIT != 0)
605 {
606 size_t bits;
607 gdb_byte mask, b1, b2;
608
609 /* The offset from the base pointers PTR1 and PTR2 is not a complete
610 number of bytes. A number of bits up to either the next exact
611 byte boundary, or LENGTH_BITS (which ever is sooner) will be
612 compared. */
613 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
614 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
615 mask = (1 << bits) - 1;
616
617 if (length_bits < bits)
618 {
619 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
620 bits = length_bits;
621 }
622
623 /* Now load the two bytes and mask off the bits we care about. */
624 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
625 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
626
627 if (b1 != b2)
628 return 1;
629
630 /* Now update the length and offsets to take account of the bits
631 we've just compared. */
632 length_bits -= bits;
633 offset1_bits += bits;
634 offset2_bits += bits;
635 }
636
637 if (length_bits % TARGET_CHAR_BIT != 0)
638 {
639 size_t bits;
640 size_t o1, o2;
641 gdb_byte mask, b1, b2;
642
643 /* The length is not an exact number of bytes. After the previous
644 IF.. block then the offsets are byte aligned, or the
645 length is zero (in which case this code is not reached). Compare
646 a number of bits at the end of the region, starting from an exact
647 byte boundary. */
648 bits = length_bits % TARGET_CHAR_BIT;
649 o1 = offset1_bits + length_bits - bits;
650 o2 = offset2_bits + length_bits - bits;
651
652 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
653 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
654
655 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
656 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
657
658 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
659 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
660
661 if (b1 != b2)
662 return 1;
663
664 length_bits -= bits;
665 }
666
667 if (length_bits > 0)
668 {
669 /* We've now taken care of any stray "bits" at the start, or end of
670 the region to compare, the remainder can be covered with a simple
671 memcmp. */
672 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
673 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
674 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
675
676 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
677 ptr2 + offset2_bits / TARGET_CHAR_BIT,
678 length_bits / TARGET_CHAR_BIT);
679 }
680
681 /* Length is zero, regions match. */
682 return 0;
683 }
684
685 /* Helper function for value_available_contents_eq. The only difference is
686 that this function is bit rather than byte based.
687
688 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits with
689 LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true
690 if the available bits match. */
691
692 static int
693 value_available_contents_bits_eq (const struct value *val1, int offset1,
694 const struct value *val2, int offset2,
695 int length)
696 {
697 int idx1 = 0, idx2 = 0;
698
699 /* See function description in value.h. */
700 gdb_assert (!val1->lazy && !val2->lazy);
701
702 while (length > 0)
703 {
704 range_s *r1, *r2;
705 ULONGEST l1, h1;
706 ULONGEST l2, h2;
707
708 idx1 = find_first_range_overlap (val1->unavailable, idx1,
709 offset1, length);
710 idx2 = find_first_range_overlap (val2->unavailable, idx2,
711 offset2, length);
712
713 /* The usual case is for both values to be completely available. */
714 if (idx1 == -1 && idx2 == -1)
715 return (memcmp_with_bit_offsets (val1->contents, offset1,
716 val2->contents, offset2,
717 length) == 0);
718 /* The contents only match equal if the available set matches as
719 well. */
720 else if (idx1 == -1 || idx2 == -1)
721 return 0;
722
723 gdb_assert (idx1 != -1 && idx2 != -1);
724
725 r1 = VEC_index (range_s, val1->unavailable, idx1);
726 r2 = VEC_index (range_s, val2->unavailable, idx2);
727
728 /* Get the unavailable windows intersected by the incoming
729 ranges. The first and last ranges that overlap the argument
730 range may be wider than said incoming arguments ranges. */
731 l1 = max (offset1, r1->offset);
732 h1 = min (offset1 + length, r1->offset + r1->length);
733
734 l2 = max (offset2, r2->offset);
735 h2 = min (offset2 + length, r2->offset + r2->length);
736
737 /* Make them relative to the respective start offsets, so we can
738 compare them for equality. */
739 l1 -= offset1;
740 h1 -= offset1;
741
742 l2 -= offset2;
743 h2 -= offset2;
744
745 /* Different availability, no match. */
746 if (l1 != l2 || h1 != h2)
747 return 0;
748
749 /* Compare the _available_ contents. */
750 if (memcmp_with_bit_offsets (val1->contents, offset1,
751 val2->contents, offset2, l1) != 0)
752 return 0;
753
754 length -= h1;
755 offset1 += h1;
756 offset2 += h1;
757 }
758
759 return 1;
760 }
761
762 int
763 value_available_contents_eq (const struct value *val1, int offset1,
764 const struct value *val2, int offset2,
765 int length)
766 {
767 return value_available_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
768 val2, offset2 * TARGET_CHAR_BIT,
769 length * TARGET_CHAR_BIT);
770 }
771
772 /* Prototypes for local functions. */
773
774 static void show_values (char *, int);
775
776 static void show_convenience (char *, int);
777
778
779 /* The value-history records all the values printed
780 by print commands during this session. Each chunk
781 records 60 consecutive values. The first chunk on
782 the chain records the most recent values.
783 The total number of values is in value_history_count. */
784
785 #define VALUE_HISTORY_CHUNK 60
786
787 struct value_history_chunk
788 {
789 struct value_history_chunk *next;
790 struct value *values[VALUE_HISTORY_CHUNK];
791 };
792
793 /* Chain of chunks now in use. */
794
795 static struct value_history_chunk *value_history_chain;
796
797 static int value_history_count; /* Abs number of last entry stored. */
798
799 \f
800 /* List of all value objects currently allocated
801 (except for those released by calls to release_value)
802 This is so they can be freed after each command. */
803
804 static struct value *all_values;
805
806 /* Allocate a lazy value for type TYPE. Its actual content is
807 "lazily" allocated too: the content field of the return value is
808 NULL; it will be allocated when it is fetched from the target. */
809
810 struct value *
811 allocate_value_lazy (struct type *type)
812 {
813 struct value *val;
814
815 /* Call check_typedef on our type to make sure that, if TYPE
816 is a TYPE_CODE_TYPEDEF, its length is set to the length
817 of the target type instead of zero. However, we do not
818 replace the typedef type by the target type, because we want
819 to keep the typedef in order to be able to set the VAL's type
820 description correctly. */
821 check_typedef (type);
822
823 val = (struct value *) xzalloc (sizeof (struct value));
824 val->contents = NULL;
825 val->next = all_values;
826 all_values = val;
827 val->type = type;
828 val->enclosing_type = type;
829 VALUE_LVAL (val) = not_lval;
830 val->location.address = 0;
831 VALUE_FRAME_ID (val) = null_frame_id;
832 val->offset = 0;
833 val->bitpos = 0;
834 val->bitsize = 0;
835 VALUE_REGNUM (val) = -1;
836 val->lazy = 1;
837 val->optimized_out = 0;
838 val->embedded_offset = 0;
839 val->pointed_to_offset = 0;
840 val->modifiable = 1;
841 val->initialized = 1; /* Default to initialized. */
842
843 /* Values start out on the all_values chain. */
844 val->reference_count = 1;
845
846 return val;
847 }
848
849 /* Allocate the contents of VAL if it has not been allocated yet. */
850
851 static void
852 allocate_value_contents (struct value *val)
853 {
854 if (!val->contents)
855 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
856 }
857
858 /* Allocate a value and its contents for type TYPE. */
859
860 struct value *
861 allocate_value (struct type *type)
862 {
863 struct value *val = allocate_value_lazy (type);
864
865 allocate_value_contents (val);
866 val->lazy = 0;
867 return val;
868 }
869
870 /* Allocate a value that has the correct length
871 for COUNT repetitions of type TYPE. */
872
873 struct value *
874 allocate_repeat_value (struct type *type, int count)
875 {
876 int low_bound = current_language->string_lower_bound; /* ??? */
877 /* FIXME-type-allocation: need a way to free this type when we are
878 done with it. */
879 struct type *array_type
880 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
881
882 return allocate_value (array_type);
883 }
884
885 struct value *
886 allocate_computed_value (struct type *type,
887 const struct lval_funcs *funcs,
888 void *closure)
889 {
890 struct value *v = allocate_value_lazy (type);
891
892 VALUE_LVAL (v) = lval_computed;
893 v->location.computed.funcs = funcs;
894 v->location.computed.closure = closure;
895
896 return v;
897 }
898
899 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
900
901 struct value *
902 allocate_optimized_out_value (struct type *type)
903 {
904 struct value *retval = allocate_value_lazy (type);
905
906 set_value_optimized_out (retval, 1);
907 /* FIXME: we should be able to avoid allocating the value's contents
908 buffer, but value_available_contents_bits_eq can't handle
909 that. */
910 /* set_value_lazy (retval, 0); */
911 return retval;
912 }
913
914 /* Accessor methods. */
915
916 struct value *
917 value_next (struct value *value)
918 {
919 return value->next;
920 }
921
922 struct type *
923 value_type (const struct value *value)
924 {
925 return value->type;
926 }
927 void
928 deprecated_set_value_type (struct value *value, struct type *type)
929 {
930 value->type = type;
931 }
932
933 int
934 value_offset (const struct value *value)
935 {
936 return value->offset;
937 }
938 void
939 set_value_offset (struct value *value, int offset)
940 {
941 value->offset = offset;
942 }
943
944 int
945 value_bitpos (const struct value *value)
946 {
947 return value->bitpos;
948 }
949 void
950 set_value_bitpos (struct value *value, int bit)
951 {
952 value->bitpos = bit;
953 }
954
955 int
956 value_bitsize (const struct value *value)
957 {
958 return value->bitsize;
959 }
960 void
961 set_value_bitsize (struct value *value, int bit)
962 {
963 value->bitsize = bit;
964 }
965
966 struct value *
967 value_parent (struct value *value)
968 {
969 return value->parent;
970 }
971
972 /* See value.h. */
973
974 void
975 set_value_parent (struct value *value, struct value *parent)
976 {
977 struct value *old = value->parent;
978
979 value->parent = parent;
980 if (parent != NULL)
981 value_incref (parent);
982 value_free (old);
983 }
984
985 gdb_byte *
986 value_contents_raw (struct value *value)
987 {
988 allocate_value_contents (value);
989 return value->contents + value->embedded_offset;
990 }
991
992 gdb_byte *
993 value_contents_all_raw (struct value *value)
994 {
995 allocate_value_contents (value);
996 return value->contents;
997 }
998
999 struct type *
1000 value_enclosing_type (struct value *value)
1001 {
1002 return value->enclosing_type;
1003 }
1004
1005 /* Look at value.h for description. */
1006
1007 struct type *
1008 value_actual_type (struct value *value, int resolve_simple_types,
1009 int *real_type_found)
1010 {
1011 struct value_print_options opts;
1012 struct type *result;
1013
1014 get_user_print_options (&opts);
1015
1016 if (real_type_found)
1017 *real_type_found = 0;
1018 result = value_type (value);
1019 if (opts.objectprint)
1020 {
1021 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1022 fetch its rtti type. */
1023 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1024 || TYPE_CODE (result) == TYPE_CODE_REF)
1025 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1026 == TYPE_CODE_STRUCT)
1027 {
1028 struct type *real_type;
1029
1030 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1031 if (real_type)
1032 {
1033 if (real_type_found)
1034 *real_type_found = 1;
1035 result = real_type;
1036 }
1037 }
1038 else if (resolve_simple_types)
1039 {
1040 if (real_type_found)
1041 *real_type_found = 1;
1042 result = value_enclosing_type (value);
1043 }
1044 }
1045
1046 return result;
1047 }
1048
1049 void
1050 error_value_optimized_out (void)
1051 {
1052 error (_("value has been optimized out"));
1053 }
1054
1055 static void
1056 require_not_optimized_out (const struct value *value)
1057 {
1058 if (value->optimized_out)
1059 {
1060 if (value->lval == lval_register)
1061 error (_("register has not been saved in frame"));
1062 else
1063 error_value_optimized_out ();
1064 }
1065 }
1066
1067 static void
1068 require_available (const struct value *value)
1069 {
1070 if (!VEC_empty (range_s, value->unavailable))
1071 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1072 }
1073
1074 const gdb_byte *
1075 value_contents_for_printing (struct value *value)
1076 {
1077 if (value->lazy)
1078 value_fetch_lazy (value);
1079 return value->contents;
1080 }
1081
1082 const gdb_byte *
1083 value_contents_for_printing_const (const struct value *value)
1084 {
1085 gdb_assert (!value->lazy);
1086 return value->contents;
1087 }
1088
1089 const gdb_byte *
1090 value_contents_all (struct value *value)
1091 {
1092 const gdb_byte *result = value_contents_for_printing (value);
1093 require_not_optimized_out (value);
1094 require_available (value);
1095 return result;
1096 }
1097
1098 /* Copy LENGTH bytes of SRC value's (all) contents
1099 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1100 contents, starting at DST_OFFSET. If unavailable contents are
1101 being copied from SRC, the corresponding DST contents are marked
1102 unavailable accordingly. Neither DST nor SRC may be lazy
1103 values.
1104
1105 It is assumed the contents of DST in the [DST_OFFSET,
1106 DST_OFFSET+LENGTH) range are wholly available. */
1107
1108 void
1109 value_contents_copy_raw (struct value *dst, int dst_offset,
1110 struct value *src, int src_offset, int length)
1111 {
1112 range_s *r;
1113 int i;
1114 int src_bit_offset, dst_bit_offset, bit_length;
1115
1116 /* A lazy DST would make that this copy operation useless, since as
1117 soon as DST's contents were un-lazied (by a later value_contents
1118 call, say), the contents would be overwritten. A lazy SRC would
1119 mean we'd be copying garbage. */
1120 gdb_assert (!dst->lazy && !src->lazy);
1121
1122 /* The overwritten DST range gets unavailability ORed in, not
1123 replaced. Make sure to remember to implement replacing if it
1124 turns out actually necessary. */
1125 gdb_assert (value_bytes_available (dst, dst_offset, length));
1126
1127 /* Copy the data. */
1128 memcpy (value_contents_all_raw (dst) + dst_offset,
1129 value_contents_all_raw (src) + src_offset,
1130 length);
1131
1132 /* Copy the meta-data, adjusted. */
1133 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1134 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1135 bit_length = length * TARGET_CHAR_BIT;
1136 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++)
1137 {
1138 ULONGEST h, l;
1139
1140 l = max (r->offset, src_bit_offset);
1141 h = min (r->offset + r->length, src_bit_offset + bit_length);
1142
1143 if (l < h)
1144 mark_value_bits_unavailable (dst,
1145 dst_bit_offset + (l - src_bit_offset),
1146 h - l);
1147 }
1148 }
1149
1150 /* Copy LENGTH bytes of SRC value's (all) contents
1151 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1152 (all) contents, starting at DST_OFFSET. If unavailable contents
1153 are being copied from SRC, the corresponding DST contents are
1154 marked unavailable accordingly. DST must not be lazy. If SRC is
1155 lazy, it will be fetched now. If SRC is not valid (is optimized
1156 out), an error is thrown.
1157
1158 It is assumed the contents of DST in the [DST_OFFSET,
1159 DST_OFFSET+LENGTH) range are wholly available. */
1160
1161 void
1162 value_contents_copy (struct value *dst, int dst_offset,
1163 struct value *src, int src_offset, int length)
1164 {
1165 require_not_optimized_out (src);
1166
1167 if (src->lazy)
1168 value_fetch_lazy (src);
1169
1170 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1171 }
1172
1173 int
1174 value_lazy (struct value *value)
1175 {
1176 return value->lazy;
1177 }
1178
1179 void
1180 set_value_lazy (struct value *value, int val)
1181 {
1182 value->lazy = val;
1183 }
1184
1185 int
1186 value_stack (struct value *value)
1187 {
1188 return value->stack;
1189 }
1190
1191 void
1192 set_value_stack (struct value *value, int val)
1193 {
1194 value->stack = val;
1195 }
1196
1197 const gdb_byte *
1198 value_contents (struct value *value)
1199 {
1200 const gdb_byte *result = value_contents_writeable (value);
1201 require_not_optimized_out (value);
1202 require_available (value);
1203 return result;
1204 }
1205
1206 gdb_byte *
1207 value_contents_writeable (struct value *value)
1208 {
1209 if (value->lazy)
1210 value_fetch_lazy (value);
1211 return value_contents_raw (value);
1212 }
1213
1214 int
1215 value_optimized_out (struct value *value)
1216 {
1217 /* We can only know if a value is optimized out once we have tried to
1218 fetch it. */
1219 if (!value->optimized_out && value->lazy)
1220 value_fetch_lazy (value);
1221
1222 return value->optimized_out;
1223 }
1224
1225 int
1226 value_optimized_out_const (const struct value *value)
1227 {
1228 return value->optimized_out;
1229 }
1230
1231 void
1232 set_value_optimized_out (struct value *value, int val)
1233 {
1234 value->optimized_out = val;
1235 }
1236
1237 int
1238 value_entirely_optimized_out (const struct value *value)
1239 {
1240 if (!value->optimized_out)
1241 return 0;
1242 if (value->lval != lval_computed
1243 || !value->location.computed.funcs->check_any_valid)
1244 return 1;
1245 return !value->location.computed.funcs->check_any_valid (value);
1246 }
1247
1248 int
1249 value_bits_valid (const struct value *value, int offset, int length)
1250 {
1251 if (!value->optimized_out)
1252 return 1;
1253 if (value->lval != lval_computed
1254 || !value->location.computed.funcs->check_validity)
1255 return 0;
1256 return value->location.computed.funcs->check_validity (value, offset,
1257 length);
1258 }
1259
1260 int
1261 value_bits_synthetic_pointer (const struct value *value,
1262 int offset, int length)
1263 {
1264 if (value->lval != lval_computed
1265 || !value->location.computed.funcs->check_synthetic_pointer)
1266 return 0;
1267 return value->location.computed.funcs->check_synthetic_pointer (value,
1268 offset,
1269 length);
1270 }
1271
1272 int
1273 value_embedded_offset (struct value *value)
1274 {
1275 return value->embedded_offset;
1276 }
1277
1278 void
1279 set_value_embedded_offset (struct value *value, int val)
1280 {
1281 value->embedded_offset = val;
1282 }
1283
1284 int
1285 value_pointed_to_offset (struct value *value)
1286 {
1287 return value->pointed_to_offset;
1288 }
1289
1290 void
1291 set_value_pointed_to_offset (struct value *value, int val)
1292 {
1293 value->pointed_to_offset = val;
1294 }
1295
1296 const struct lval_funcs *
1297 value_computed_funcs (const struct value *v)
1298 {
1299 gdb_assert (value_lval_const (v) == lval_computed);
1300
1301 return v->location.computed.funcs;
1302 }
1303
1304 void *
1305 value_computed_closure (const struct value *v)
1306 {
1307 gdb_assert (v->lval == lval_computed);
1308
1309 return v->location.computed.closure;
1310 }
1311
1312 enum lval_type *
1313 deprecated_value_lval_hack (struct value *value)
1314 {
1315 return &value->lval;
1316 }
1317
1318 enum lval_type
1319 value_lval_const (const struct value *value)
1320 {
1321 return value->lval;
1322 }
1323
1324 CORE_ADDR
1325 value_address (const struct value *value)
1326 {
1327 if (value->lval == lval_internalvar
1328 || value->lval == lval_internalvar_component
1329 || value->lval == lval_xcallable)
1330 return 0;
1331 if (value->parent != NULL)
1332 return value_address (value->parent) + value->offset;
1333 else
1334 return value->location.address + value->offset;
1335 }
1336
1337 CORE_ADDR
1338 value_raw_address (struct value *value)
1339 {
1340 if (value->lval == lval_internalvar
1341 || value->lval == lval_internalvar_component
1342 || value->lval == lval_xcallable)
1343 return 0;
1344 return value->location.address;
1345 }
1346
1347 void
1348 set_value_address (struct value *value, CORE_ADDR addr)
1349 {
1350 gdb_assert (value->lval != lval_internalvar
1351 && value->lval != lval_internalvar_component
1352 && value->lval != lval_xcallable);
1353 value->location.address = addr;
1354 }
1355
1356 struct internalvar **
1357 deprecated_value_internalvar_hack (struct value *value)
1358 {
1359 return &value->location.internalvar;
1360 }
1361
1362 struct frame_id *
1363 deprecated_value_frame_id_hack (struct value *value)
1364 {
1365 return &value->frame_id;
1366 }
1367
1368 short *
1369 deprecated_value_regnum_hack (struct value *value)
1370 {
1371 return &value->regnum;
1372 }
1373
1374 int
1375 deprecated_value_modifiable (struct value *value)
1376 {
1377 return value->modifiable;
1378 }
1379 \f
1380 /* Return a mark in the value chain. All values allocated after the
1381 mark is obtained (except for those released) are subject to being freed
1382 if a subsequent value_free_to_mark is passed the mark. */
1383 struct value *
1384 value_mark (void)
1385 {
1386 return all_values;
1387 }
1388
1389 /* Take a reference to VAL. VAL will not be deallocated until all
1390 references are released. */
1391
1392 void
1393 value_incref (struct value *val)
1394 {
1395 val->reference_count++;
1396 }
1397
1398 /* Release a reference to VAL, which was acquired with value_incref.
1399 This function is also called to deallocate values from the value
1400 chain. */
1401
1402 void
1403 value_free (struct value *val)
1404 {
1405 if (val)
1406 {
1407 gdb_assert (val->reference_count > 0);
1408 val->reference_count--;
1409 if (val->reference_count > 0)
1410 return;
1411
1412 /* If there's an associated parent value, drop our reference to
1413 it. */
1414 if (val->parent != NULL)
1415 value_free (val->parent);
1416
1417 if (VALUE_LVAL (val) == lval_computed)
1418 {
1419 const struct lval_funcs *funcs = val->location.computed.funcs;
1420
1421 if (funcs->free_closure)
1422 funcs->free_closure (val);
1423 }
1424 else if (VALUE_LVAL (val) == lval_xcallable)
1425 free_xmethod_worker (val->location.xm_worker);
1426
1427 xfree (val->contents);
1428 VEC_free (range_s, val->unavailable);
1429 }
1430 xfree (val);
1431 }
1432
1433 /* Free all values allocated since MARK was obtained by value_mark
1434 (except for those released). */
1435 void
1436 value_free_to_mark (struct value *mark)
1437 {
1438 struct value *val;
1439 struct value *next;
1440
1441 for (val = all_values; val && val != mark; val = next)
1442 {
1443 next = val->next;
1444 val->released = 1;
1445 value_free (val);
1446 }
1447 all_values = val;
1448 }
1449
1450 /* Free all the values that have been allocated (except for those released).
1451 Call after each command, successful or not.
1452 In practice this is called before each command, which is sufficient. */
1453
1454 void
1455 free_all_values (void)
1456 {
1457 struct value *val;
1458 struct value *next;
1459
1460 for (val = all_values; val; val = next)
1461 {
1462 next = val->next;
1463 val->released = 1;
1464 value_free (val);
1465 }
1466
1467 all_values = 0;
1468 }
1469
1470 /* Frees all the elements in a chain of values. */
1471
1472 void
1473 free_value_chain (struct value *v)
1474 {
1475 struct value *next;
1476
1477 for (; v; v = next)
1478 {
1479 next = value_next (v);
1480 value_free (v);
1481 }
1482 }
1483
1484 /* Remove VAL from the chain all_values
1485 so it will not be freed automatically. */
1486
1487 void
1488 release_value (struct value *val)
1489 {
1490 struct value *v;
1491
1492 if (all_values == val)
1493 {
1494 all_values = val->next;
1495 val->next = NULL;
1496 val->released = 1;
1497 return;
1498 }
1499
1500 for (v = all_values; v; v = v->next)
1501 {
1502 if (v->next == val)
1503 {
1504 v->next = val->next;
1505 val->next = NULL;
1506 val->released = 1;
1507 break;
1508 }
1509 }
1510 }
1511
1512 /* If the value is not already released, release it.
1513 If the value is already released, increment its reference count.
1514 That is, this function ensures that the value is released from the
1515 value chain and that the caller owns a reference to it. */
1516
1517 void
1518 release_value_or_incref (struct value *val)
1519 {
1520 if (val->released)
1521 value_incref (val);
1522 else
1523 release_value (val);
1524 }
1525
1526 /* Release all values up to mark */
1527 struct value *
1528 value_release_to_mark (struct value *mark)
1529 {
1530 struct value *val;
1531 struct value *next;
1532
1533 for (val = next = all_values; next; next = next->next)
1534 {
1535 if (next->next == mark)
1536 {
1537 all_values = next->next;
1538 next->next = NULL;
1539 return val;
1540 }
1541 next->released = 1;
1542 }
1543 all_values = 0;
1544 return val;
1545 }
1546
1547 /* Return a copy of the value ARG.
1548 It contains the same contents, for same memory address,
1549 but it's a different block of storage. */
1550
1551 struct value *
1552 value_copy (struct value *arg)
1553 {
1554 struct type *encl_type = value_enclosing_type (arg);
1555 struct value *val;
1556
1557 if (value_lazy (arg))
1558 val = allocate_value_lazy (encl_type);
1559 else
1560 val = allocate_value (encl_type);
1561 val->type = arg->type;
1562 VALUE_LVAL (val) = VALUE_LVAL (arg);
1563 val->location = arg->location;
1564 val->offset = arg->offset;
1565 val->bitpos = arg->bitpos;
1566 val->bitsize = arg->bitsize;
1567 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1568 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1569 val->lazy = arg->lazy;
1570 val->optimized_out = arg->optimized_out;
1571 val->embedded_offset = value_embedded_offset (arg);
1572 val->pointed_to_offset = arg->pointed_to_offset;
1573 val->modifiable = arg->modifiable;
1574 if (!value_lazy (val))
1575 {
1576 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1577 TYPE_LENGTH (value_enclosing_type (arg)));
1578
1579 }
1580 val->unavailable = VEC_copy (range_s, arg->unavailable);
1581 set_value_parent (val, arg->parent);
1582 if (VALUE_LVAL (val) == lval_computed)
1583 {
1584 const struct lval_funcs *funcs = val->location.computed.funcs;
1585
1586 if (funcs->copy_closure)
1587 val->location.computed.closure = funcs->copy_closure (val);
1588 }
1589 return val;
1590 }
1591
1592 /* Return a version of ARG that is non-lvalue. */
1593
1594 struct value *
1595 value_non_lval (struct value *arg)
1596 {
1597 if (VALUE_LVAL (arg) != not_lval)
1598 {
1599 struct type *enc_type = value_enclosing_type (arg);
1600 struct value *val = allocate_value (enc_type);
1601
1602 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1603 TYPE_LENGTH (enc_type));
1604 val->type = arg->type;
1605 set_value_embedded_offset (val, value_embedded_offset (arg));
1606 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1607 return val;
1608 }
1609 return arg;
1610 }
1611
1612 void
1613 set_value_component_location (struct value *component,
1614 const struct value *whole)
1615 {
1616 gdb_assert (whole->lval != lval_xcallable);
1617
1618 if (whole->lval == lval_internalvar)
1619 VALUE_LVAL (component) = lval_internalvar_component;
1620 else
1621 VALUE_LVAL (component) = whole->lval;
1622
1623 component->location = whole->location;
1624 if (whole->lval == lval_computed)
1625 {
1626 const struct lval_funcs *funcs = whole->location.computed.funcs;
1627
1628 if (funcs->copy_closure)
1629 component->location.computed.closure = funcs->copy_closure (whole);
1630 }
1631 }
1632
1633 \f
1634 /* Access to the value history. */
1635
1636 /* Record a new value in the value history.
1637 Returns the absolute history index of the entry. */
1638
1639 int
1640 record_latest_value (struct value *val)
1641 {
1642 int i;
1643
1644 /* We don't want this value to have anything to do with the inferior anymore.
1645 In particular, "set $1 = 50" should not affect the variable from which
1646 the value was taken, and fast watchpoints should be able to assume that
1647 a value on the value history never changes. */
1648 if (value_lazy (val))
1649 value_fetch_lazy (val);
1650 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1651 from. This is a bit dubious, because then *&$1 does not just return $1
1652 but the current contents of that location. c'est la vie... */
1653 val->modifiable = 0;
1654
1655 /* The value may have already been released, in which case we're adding a
1656 new reference for its entry in the history. That is why we call
1657 release_value_or_incref here instead of release_value. */
1658 release_value_or_incref (val);
1659
1660 /* Here we treat value_history_count as origin-zero
1661 and applying to the value being stored now. */
1662
1663 i = value_history_count % VALUE_HISTORY_CHUNK;
1664 if (i == 0)
1665 {
1666 struct value_history_chunk *new
1667 = (struct value_history_chunk *)
1668
1669 xmalloc (sizeof (struct value_history_chunk));
1670 memset (new->values, 0, sizeof new->values);
1671 new->next = value_history_chain;
1672 value_history_chain = new;
1673 }
1674
1675 value_history_chain->values[i] = val;
1676
1677 /* Now we regard value_history_count as origin-one
1678 and applying to the value just stored. */
1679
1680 return ++value_history_count;
1681 }
1682
1683 /* Return a copy of the value in the history with sequence number NUM. */
1684
1685 struct value *
1686 access_value_history (int num)
1687 {
1688 struct value_history_chunk *chunk;
1689 int i;
1690 int absnum = num;
1691
1692 if (absnum <= 0)
1693 absnum += value_history_count;
1694
1695 if (absnum <= 0)
1696 {
1697 if (num == 0)
1698 error (_("The history is empty."));
1699 else if (num == 1)
1700 error (_("There is only one value in the history."));
1701 else
1702 error (_("History does not go back to $$%d."), -num);
1703 }
1704 if (absnum > value_history_count)
1705 error (_("History has not yet reached $%d."), absnum);
1706
1707 absnum--;
1708
1709 /* Now absnum is always absolute and origin zero. */
1710
1711 chunk = value_history_chain;
1712 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1713 - absnum / VALUE_HISTORY_CHUNK;
1714 i > 0; i--)
1715 chunk = chunk->next;
1716
1717 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1718 }
1719
1720 static void
1721 show_values (char *num_exp, int from_tty)
1722 {
1723 int i;
1724 struct value *val;
1725 static int num = 1;
1726
1727 if (num_exp)
1728 {
1729 /* "show values +" should print from the stored position.
1730 "show values <exp>" should print around value number <exp>. */
1731 if (num_exp[0] != '+' || num_exp[1] != '\0')
1732 num = parse_and_eval_long (num_exp) - 5;
1733 }
1734 else
1735 {
1736 /* "show values" means print the last 10 values. */
1737 num = value_history_count - 9;
1738 }
1739
1740 if (num <= 0)
1741 num = 1;
1742
1743 for (i = num; i < num + 10 && i <= value_history_count; i++)
1744 {
1745 struct value_print_options opts;
1746
1747 val = access_value_history (i);
1748 printf_filtered (("$%d = "), i);
1749 get_user_print_options (&opts);
1750 value_print (val, gdb_stdout, &opts);
1751 printf_filtered (("\n"));
1752 }
1753
1754 /* The next "show values +" should start after what we just printed. */
1755 num += 10;
1756
1757 /* Hitting just return after this command should do the same thing as
1758 "show values +". If num_exp is null, this is unnecessary, since
1759 "show values +" is not useful after "show values". */
1760 if (from_tty && num_exp)
1761 {
1762 num_exp[0] = '+';
1763 num_exp[1] = '\0';
1764 }
1765 }
1766 \f
1767 /* Internal variables. These are variables within the debugger
1768 that hold values assigned by debugger commands.
1769 The user refers to them with a '$' prefix
1770 that does not appear in the variable names stored internally. */
1771
1772 struct internalvar
1773 {
1774 struct internalvar *next;
1775 char *name;
1776
1777 /* We support various different kinds of content of an internal variable.
1778 enum internalvar_kind specifies the kind, and union internalvar_data
1779 provides the data associated with this particular kind. */
1780
1781 enum internalvar_kind
1782 {
1783 /* The internal variable is empty. */
1784 INTERNALVAR_VOID,
1785
1786 /* The value of the internal variable is provided directly as
1787 a GDB value object. */
1788 INTERNALVAR_VALUE,
1789
1790 /* A fresh value is computed via a call-back routine on every
1791 access to the internal variable. */
1792 INTERNALVAR_MAKE_VALUE,
1793
1794 /* The internal variable holds a GDB internal convenience function. */
1795 INTERNALVAR_FUNCTION,
1796
1797 /* The variable holds an integer value. */
1798 INTERNALVAR_INTEGER,
1799
1800 /* The variable holds a GDB-provided string. */
1801 INTERNALVAR_STRING,
1802
1803 } kind;
1804
1805 union internalvar_data
1806 {
1807 /* A value object used with INTERNALVAR_VALUE. */
1808 struct value *value;
1809
1810 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1811 struct
1812 {
1813 /* The functions to call. */
1814 const struct internalvar_funcs *functions;
1815
1816 /* The function's user-data. */
1817 void *data;
1818 } make_value;
1819
1820 /* The internal function used with INTERNALVAR_FUNCTION. */
1821 struct
1822 {
1823 struct internal_function *function;
1824 /* True if this is the canonical name for the function. */
1825 int canonical;
1826 } fn;
1827
1828 /* An integer value used with INTERNALVAR_INTEGER. */
1829 struct
1830 {
1831 /* If type is non-NULL, it will be used as the type to generate
1832 a value for this internal variable. If type is NULL, a default
1833 integer type for the architecture is used. */
1834 struct type *type;
1835 LONGEST val;
1836 } integer;
1837
1838 /* A string value used with INTERNALVAR_STRING. */
1839 char *string;
1840 } u;
1841 };
1842
1843 static struct internalvar *internalvars;
1844
1845 /* If the variable does not already exist create it and give it the
1846 value given. If no value is given then the default is zero. */
1847 static void
1848 init_if_undefined_command (char* args, int from_tty)
1849 {
1850 struct internalvar* intvar;
1851
1852 /* Parse the expression - this is taken from set_command(). */
1853 struct expression *expr = parse_expression (args);
1854 register struct cleanup *old_chain =
1855 make_cleanup (free_current_contents, &expr);
1856
1857 /* Validate the expression.
1858 Was the expression an assignment?
1859 Or even an expression at all? */
1860 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1861 error (_("Init-if-undefined requires an assignment expression."));
1862
1863 /* Extract the variable from the parsed expression.
1864 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1865 if (expr->elts[1].opcode != OP_INTERNALVAR)
1866 error (_("The first parameter to init-if-undefined "
1867 "should be a GDB variable."));
1868 intvar = expr->elts[2].internalvar;
1869
1870 /* Only evaluate the expression if the lvalue is void.
1871 This may still fail if the expresssion is invalid. */
1872 if (intvar->kind == INTERNALVAR_VOID)
1873 evaluate_expression (expr);
1874
1875 do_cleanups (old_chain);
1876 }
1877
1878
1879 /* Look up an internal variable with name NAME. NAME should not
1880 normally include a dollar sign.
1881
1882 If the specified internal variable does not exist,
1883 the return value is NULL. */
1884
1885 struct internalvar *
1886 lookup_only_internalvar (const char *name)
1887 {
1888 struct internalvar *var;
1889
1890 for (var = internalvars; var; var = var->next)
1891 if (strcmp (var->name, name) == 0)
1892 return var;
1893
1894 return NULL;
1895 }
1896
1897 /* Complete NAME by comparing it to the names of internal variables.
1898 Returns a vector of newly allocated strings, or NULL if no matches
1899 were found. */
1900
1901 VEC (char_ptr) *
1902 complete_internalvar (const char *name)
1903 {
1904 VEC (char_ptr) *result = NULL;
1905 struct internalvar *var;
1906 int len;
1907
1908 len = strlen (name);
1909
1910 for (var = internalvars; var; var = var->next)
1911 if (strncmp (var->name, name, len) == 0)
1912 {
1913 char *r = xstrdup (var->name);
1914
1915 VEC_safe_push (char_ptr, result, r);
1916 }
1917
1918 return result;
1919 }
1920
1921 /* Create an internal variable with name NAME and with a void value.
1922 NAME should not normally include a dollar sign. */
1923
1924 struct internalvar *
1925 create_internalvar (const char *name)
1926 {
1927 struct internalvar *var;
1928
1929 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1930 var->name = concat (name, (char *)NULL);
1931 var->kind = INTERNALVAR_VOID;
1932 var->next = internalvars;
1933 internalvars = var;
1934 return var;
1935 }
1936
1937 /* Create an internal variable with name NAME and register FUN as the
1938 function that value_of_internalvar uses to create a value whenever
1939 this variable is referenced. NAME should not normally include a
1940 dollar sign. DATA is passed uninterpreted to FUN when it is
1941 called. CLEANUP, if not NULL, is called when the internal variable
1942 is destroyed. It is passed DATA as its only argument. */
1943
1944 struct internalvar *
1945 create_internalvar_type_lazy (const char *name,
1946 const struct internalvar_funcs *funcs,
1947 void *data)
1948 {
1949 struct internalvar *var = create_internalvar (name);
1950
1951 var->kind = INTERNALVAR_MAKE_VALUE;
1952 var->u.make_value.functions = funcs;
1953 var->u.make_value.data = data;
1954 return var;
1955 }
1956
1957 /* See documentation in value.h. */
1958
1959 int
1960 compile_internalvar_to_ax (struct internalvar *var,
1961 struct agent_expr *expr,
1962 struct axs_value *value)
1963 {
1964 if (var->kind != INTERNALVAR_MAKE_VALUE
1965 || var->u.make_value.functions->compile_to_ax == NULL)
1966 return 0;
1967
1968 var->u.make_value.functions->compile_to_ax (var, expr, value,
1969 var->u.make_value.data);
1970 return 1;
1971 }
1972
1973 /* Look up an internal variable with name NAME. NAME should not
1974 normally include a dollar sign.
1975
1976 If the specified internal variable does not exist,
1977 one is created, with a void value. */
1978
1979 struct internalvar *
1980 lookup_internalvar (const char *name)
1981 {
1982 struct internalvar *var;
1983
1984 var = lookup_only_internalvar (name);
1985 if (var)
1986 return var;
1987
1988 return create_internalvar (name);
1989 }
1990
1991 /* Return current value of internal variable VAR. For variables that
1992 are not inherently typed, use a value type appropriate for GDBARCH. */
1993
1994 struct value *
1995 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
1996 {
1997 struct value *val;
1998 struct trace_state_variable *tsv;
1999
2000 /* If there is a trace state variable of the same name, assume that
2001 is what we really want to see. */
2002 tsv = find_trace_state_variable (var->name);
2003 if (tsv)
2004 {
2005 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2006 &(tsv->value));
2007 if (tsv->value_known)
2008 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2009 tsv->value);
2010 else
2011 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2012 return val;
2013 }
2014
2015 switch (var->kind)
2016 {
2017 case INTERNALVAR_VOID:
2018 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2019 break;
2020
2021 case INTERNALVAR_FUNCTION:
2022 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2023 break;
2024
2025 case INTERNALVAR_INTEGER:
2026 if (!var->u.integer.type)
2027 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2028 var->u.integer.val);
2029 else
2030 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2031 break;
2032
2033 case INTERNALVAR_STRING:
2034 val = value_cstring (var->u.string, strlen (var->u.string),
2035 builtin_type (gdbarch)->builtin_char);
2036 break;
2037
2038 case INTERNALVAR_VALUE:
2039 val = value_copy (var->u.value);
2040 if (value_lazy (val))
2041 value_fetch_lazy (val);
2042 break;
2043
2044 case INTERNALVAR_MAKE_VALUE:
2045 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2046 var->u.make_value.data);
2047 break;
2048
2049 default:
2050 internal_error (__FILE__, __LINE__, _("bad kind"));
2051 }
2052
2053 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2054 on this value go back to affect the original internal variable.
2055
2056 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2057 no underlying modifyable state in the internal variable.
2058
2059 Likewise, if the variable's value is a computed lvalue, we want
2060 references to it to produce another computed lvalue, where
2061 references and assignments actually operate through the
2062 computed value's functions.
2063
2064 This means that internal variables with computed values
2065 behave a little differently from other internal variables:
2066 assignments to them don't just replace the previous value
2067 altogether. At the moment, this seems like the behavior we
2068 want. */
2069
2070 if (var->kind != INTERNALVAR_MAKE_VALUE
2071 && val->lval != lval_computed)
2072 {
2073 VALUE_LVAL (val) = lval_internalvar;
2074 VALUE_INTERNALVAR (val) = var;
2075 }
2076
2077 return val;
2078 }
2079
2080 int
2081 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2082 {
2083 if (var->kind == INTERNALVAR_INTEGER)
2084 {
2085 *result = var->u.integer.val;
2086 return 1;
2087 }
2088
2089 if (var->kind == INTERNALVAR_VALUE)
2090 {
2091 struct type *type = check_typedef (value_type (var->u.value));
2092
2093 if (TYPE_CODE (type) == TYPE_CODE_INT)
2094 {
2095 *result = value_as_long (var->u.value);
2096 return 1;
2097 }
2098 }
2099
2100 return 0;
2101 }
2102
2103 static int
2104 get_internalvar_function (struct internalvar *var,
2105 struct internal_function **result)
2106 {
2107 switch (var->kind)
2108 {
2109 case INTERNALVAR_FUNCTION:
2110 *result = var->u.fn.function;
2111 return 1;
2112
2113 default:
2114 return 0;
2115 }
2116 }
2117
2118 void
2119 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2120 int bitsize, struct value *newval)
2121 {
2122 gdb_byte *addr;
2123
2124 switch (var->kind)
2125 {
2126 case INTERNALVAR_VALUE:
2127 addr = value_contents_writeable (var->u.value);
2128
2129 if (bitsize)
2130 modify_field (value_type (var->u.value), addr + offset,
2131 value_as_long (newval), bitpos, bitsize);
2132 else
2133 memcpy (addr + offset, value_contents (newval),
2134 TYPE_LENGTH (value_type (newval)));
2135 break;
2136
2137 default:
2138 /* We can never get a component of any other kind. */
2139 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2140 }
2141 }
2142
2143 void
2144 set_internalvar (struct internalvar *var, struct value *val)
2145 {
2146 enum internalvar_kind new_kind;
2147 union internalvar_data new_data = { 0 };
2148
2149 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2150 error (_("Cannot overwrite convenience function %s"), var->name);
2151
2152 /* Prepare new contents. */
2153 switch (TYPE_CODE (check_typedef (value_type (val))))
2154 {
2155 case TYPE_CODE_VOID:
2156 new_kind = INTERNALVAR_VOID;
2157 break;
2158
2159 case TYPE_CODE_INTERNAL_FUNCTION:
2160 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2161 new_kind = INTERNALVAR_FUNCTION;
2162 get_internalvar_function (VALUE_INTERNALVAR (val),
2163 &new_data.fn.function);
2164 /* Copies created here are never canonical. */
2165 break;
2166
2167 default:
2168 new_kind = INTERNALVAR_VALUE;
2169 new_data.value = value_copy (val);
2170 new_data.value->modifiable = 1;
2171
2172 /* Force the value to be fetched from the target now, to avoid problems
2173 later when this internalvar is referenced and the target is gone or
2174 has changed. */
2175 if (value_lazy (new_data.value))
2176 value_fetch_lazy (new_data.value);
2177
2178 /* Release the value from the value chain to prevent it from being
2179 deleted by free_all_values. From here on this function should not
2180 call error () until new_data is installed into the var->u to avoid
2181 leaking memory. */
2182 release_value (new_data.value);
2183 break;
2184 }
2185
2186 /* Clean up old contents. */
2187 clear_internalvar (var);
2188
2189 /* Switch over. */
2190 var->kind = new_kind;
2191 var->u = new_data;
2192 /* End code which must not call error(). */
2193 }
2194
2195 void
2196 set_internalvar_integer (struct internalvar *var, LONGEST l)
2197 {
2198 /* Clean up old contents. */
2199 clear_internalvar (var);
2200
2201 var->kind = INTERNALVAR_INTEGER;
2202 var->u.integer.type = NULL;
2203 var->u.integer.val = l;
2204 }
2205
2206 void
2207 set_internalvar_string (struct internalvar *var, const char *string)
2208 {
2209 /* Clean up old contents. */
2210 clear_internalvar (var);
2211
2212 var->kind = INTERNALVAR_STRING;
2213 var->u.string = xstrdup (string);
2214 }
2215
2216 static void
2217 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2218 {
2219 /* Clean up old contents. */
2220 clear_internalvar (var);
2221
2222 var->kind = INTERNALVAR_FUNCTION;
2223 var->u.fn.function = f;
2224 var->u.fn.canonical = 1;
2225 /* Variables installed here are always the canonical version. */
2226 }
2227
2228 void
2229 clear_internalvar (struct internalvar *var)
2230 {
2231 /* Clean up old contents. */
2232 switch (var->kind)
2233 {
2234 case INTERNALVAR_VALUE:
2235 value_free (var->u.value);
2236 break;
2237
2238 case INTERNALVAR_STRING:
2239 xfree (var->u.string);
2240 break;
2241
2242 case INTERNALVAR_MAKE_VALUE:
2243 if (var->u.make_value.functions->destroy != NULL)
2244 var->u.make_value.functions->destroy (var->u.make_value.data);
2245 break;
2246
2247 default:
2248 break;
2249 }
2250
2251 /* Reset to void kind. */
2252 var->kind = INTERNALVAR_VOID;
2253 }
2254
2255 char *
2256 internalvar_name (struct internalvar *var)
2257 {
2258 return var->name;
2259 }
2260
2261 static struct internal_function *
2262 create_internal_function (const char *name,
2263 internal_function_fn handler, void *cookie)
2264 {
2265 struct internal_function *ifn = XNEW (struct internal_function);
2266
2267 ifn->name = xstrdup (name);
2268 ifn->handler = handler;
2269 ifn->cookie = cookie;
2270 return ifn;
2271 }
2272
2273 char *
2274 value_internal_function_name (struct value *val)
2275 {
2276 struct internal_function *ifn;
2277 int result;
2278
2279 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2280 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2281 gdb_assert (result);
2282
2283 return ifn->name;
2284 }
2285
2286 struct value *
2287 call_internal_function (struct gdbarch *gdbarch,
2288 const struct language_defn *language,
2289 struct value *func, int argc, struct value **argv)
2290 {
2291 struct internal_function *ifn;
2292 int result;
2293
2294 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2295 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2296 gdb_assert (result);
2297
2298 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2299 }
2300
2301 /* The 'function' command. This does nothing -- it is just a
2302 placeholder to let "help function NAME" work. This is also used as
2303 the implementation of the sub-command that is created when
2304 registering an internal function. */
2305 static void
2306 function_command (char *command, int from_tty)
2307 {
2308 /* Do nothing. */
2309 }
2310
2311 /* Clean up if an internal function's command is destroyed. */
2312 static void
2313 function_destroyer (struct cmd_list_element *self, void *ignore)
2314 {
2315 xfree ((char *) self->name);
2316 xfree ((char *) self->doc);
2317 }
2318
2319 /* Add a new internal function. NAME is the name of the function; DOC
2320 is a documentation string describing the function. HANDLER is
2321 called when the function is invoked. COOKIE is an arbitrary
2322 pointer which is passed to HANDLER and is intended for "user
2323 data". */
2324 void
2325 add_internal_function (const char *name, const char *doc,
2326 internal_function_fn handler, void *cookie)
2327 {
2328 struct cmd_list_element *cmd;
2329 struct internal_function *ifn;
2330 struct internalvar *var = lookup_internalvar (name);
2331
2332 ifn = create_internal_function (name, handler, cookie);
2333 set_internalvar_function (var, ifn);
2334
2335 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2336 &functionlist);
2337 cmd->destroyer = function_destroyer;
2338 }
2339
2340 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2341 prevent cycles / duplicates. */
2342
2343 void
2344 preserve_one_value (struct value *value, struct objfile *objfile,
2345 htab_t copied_types)
2346 {
2347 if (TYPE_OBJFILE (value->type) == objfile)
2348 value->type = copy_type_recursive (objfile, value->type, copied_types);
2349
2350 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2351 value->enclosing_type = copy_type_recursive (objfile,
2352 value->enclosing_type,
2353 copied_types);
2354 }
2355
2356 /* Likewise for internal variable VAR. */
2357
2358 static void
2359 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2360 htab_t copied_types)
2361 {
2362 switch (var->kind)
2363 {
2364 case INTERNALVAR_INTEGER:
2365 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2366 var->u.integer.type
2367 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2368 break;
2369
2370 case INTERNALVAR_VALUE:
2371 preserve_one_value (var->u.value, objfile, copied_types);
2372 break;
2373 }
2374 }
2375
2376 /* Update the internal variables and value history when OBJFILE is
2377 discarded; we must copy the types out of the objfile. New global types
2378 will be created for every convenience variable which currently points to
2379 this objfile's types, and the convenience variables will be adjusted to
2380 use the new global types. */
2381
2382 void
2383 preserve_values (struct objfile *objfile)
2384 {
2385 htab_t copied_types;
2386 struct value_history_chunk *cur;
2387 struct internalvar *var;
2388 int i;
2389
2390 /* Create the hash table. We allocate on the objfile's obstack, since
2391 it is soon to be deleted. */
2392 copied_types = create_copied_types_hash (objfile);
2393
2394 for (cur = value_history_chain; cur; cur = cur->next)
2395 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2396 if (cur->values[i])
2397 preserve_one_value (cur->values[i], objfile, copied_types);
2398
2399 for (var = internalvars; var; var = var->next)
2400 preserve_one_internalvar (var, objfile, copied_types);
2401
2402 preserve_ext_lang_values (objfile, copied_types);
2403
2404 htab_delete (copied_types);
2405 }
2406
2407 static void
2408 show_convenience (char *ignore, int from_tty)
2409 {
2410 struct gdbarch *gdbarch = get_current_arch ();
2411 struct internalvar *var;
2412 int varseen = 0;
2413 struct value_print_options opts;
2414
2415 get_user_print_options (&opts);
2416 for (var = internalvars; var; var = var->next)
2417 {
2418 volatile struct gdb_exception ex;
2419
2420 if (!varseen)
2421 {
2422 varseen = 1;
2423 }
2424 printf_filtered (("$%s = "), var->name);
2425
2426 TRY_CATCH (ex, RETURN_MASK_ERROR)
2427 {
2428 struct value *val;
2429
2430 val = value_of_internalvar (gdbarch, var);
2431 value_print (val, gdb_stdout, &opts);
2432 }
2433 if (ex.reason < 0)
2434 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2435 printf_filtered (("\n"));
2436 }
2437 if (!varseen)
2438 {
2439 /* This text does not mention convenience functions on purpose.
2440 The user can't create them except via Python, and if Python support
2441 is installed this message will never be printed ($_streq will
2442 exist). */
2443 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2444 "Convenience variables have "
2445 "names starting with \"$\";\n"
2446 "use \"set\" as in \"set "
2447 "$foo = 5\" to define them.\n"));
2448 }
2449 }
2450 \f
2451 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2452
2453 struct value *
2454 value_of_xmethod (struct xmethod_worker *worker)
2455 {
2456 if (worker->value == NULL)
2457 {
2458 struct value *v;
2459
2460 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2461 v->lval = lval_xcallable;
2462 v->location.xm_worker = worker;
2463 v->modifiable = 0;
2464 worker->value = v;
2465 }
2466
2467 return worker->value;
2468 }
2469
2470 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2471
2472 struct value *
2473 call_xmethod (struct value *method, int argc, struct value **argv)
2474 {
2475 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2476 && method->lval == lval_xcallable && argc > 0);
2477
2478 return invoke_xmethod (method->location.xm_worker,
2479 argv[0], argv + 1, argc - 1);
2480 }
2481 \f
2482 /* Extract a value as a C number (either long or double).
2483 Knows how to convert fixed values to double, or
2484 floating values to long.
2485 Does not deallocate the value. */
2486
2487 LONGEST
2488 value_as_long (struct value *val)
2489 {
2490 /* This coerces arrays and functions, which is necessary (e.g.
2491 in disassemble_command). It also dereferences references, which
2492 I suspect is the most logical thing to do. */
2493 val = coerce_array (val);
2494 return unpack_long (value_type (val), value_contents (val));
2495 }
2496
2497 DOUBLEST
2498 value_as_double (struct value *val)
2499 {
2500 DOUBLEST foo;
2501 int inv;
2502
2503 foo = unpack_double (value_type (val), value_contents (val), &inv);
2504 if (inv)
2505 error (_("Invalid floating value found in program."));
2506 return foo;
2507 }
2508
2509 /* Extract a value as a C pointer. Does not deallocate the value.
2510 Note that val's type may not actually be a pointer; value_as_long
2511 handles all the cases. */
2512 CORE_ADDR
2513 value_as_address (struct value *val)
2514 {
2515 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2516
2517 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2518 whether we want this to be true eventually. */
2519 #if 0
2520 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2521 non-address (e.g. argument to "signal", "info break", etc.), or
2522 for pointers to char, in which the low bits *are* significant. */
2523 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2524 #else
2525
2526 /* There are several targets (IA-64, PowerPC, and others) which
2527 don't represent pointers to functions as simply the address of
2528 the function's entry point. For example, on the IA-64, a
2529 function pointer points to a two-word descriptor, generated by
2530 the linker, which contains the function's entry point, and the
2531 value the IA-64 "global pointer" register should have --- to
2532 support position-independent code. The linker generates
2533 descriptors only for those functions whose addresses are taken.
2534
2535 On such targets, it's difficult for GDB to convert an arbitrary
2536 function address into a function pointer; it has to either find
2537 an existing descriptor for that function, or call malloc and
2538 build its own. On some targets, it is impossible for GDB to
2539 build a descriptor at all: the descriptor must contain a jump
2540 instruction; data memory cannot be executed; and code memory
2541 cannot be modified.
2542
2543 Upon entry to this function, if VAL is a value of type `function'
2544 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2545 value_address (val) is the address of the function. This is what
2546 you'll get if you evaluate an expression like `main'. The call
2547 to COERCE_ARRAY below actually does all the usual unary
2548 conversions, which includes converting values of type `function'
2549 to `pointer to function'. This is the challenging conversion
2550 discussed above. Then, `unpack_long' will convert that pointer
2551 back into an address.
2552
2553 So, suppose the user types `disassemble foo' on an architecture
2554 with a strange function pointer representation, on which GDB
2555 cannot build its own descriptors, and suppose further that `foo'
2556 has no linker-built descriptor. The address->pointer conversion
2557 will signal an error and prevent the command from running, even
2558 though the next step would have been to convert the pointer
2559 directly back into the same address.
2560
2561 The following shortcut avoids this whole mess. If VAL is a
2562 function, just return its address directly. */
2563 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2564 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2565 return value_address (val);
2566
2567 val = coerce_array (val);
2568
2569 /* Some architectures (e.g. Harvard), map instruction and data
2570 addresses onto a single large unified address space. For
2571 instance: An architecture may consider a large integer in the
2572 range 0x10000000 .. 0x1000ffff to already represent a data
2573 addresses (hence not need a pointer to address conversion) while
2574 a small integer would still need to be converted integer to
2575 pointer to address. Just assume such architectures handle all
2576 integer conversions in a single function. */
2577
2578 /* JimB writes:
2579
2580 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2581 must admonish GDB hackers to make sure its behavior matches the
2582 compiler's, whenever possible.
2583
2584 In general, I think GDB should evaluate expressions the same way
2585 the compiler does. When the user copies an expression out of
2586 their source code and hands it to a `print' command, they should
2587 get the same value the compiler would have computed. Any
2588 deviation from this rule can cause major confusion and annoyance,
2589 and needs to be justified carefully. In other words, GDB doesn't
2590 really have the freedom to do these conversions in clever and
2591 useful ways.
2592
2593 AndrewC pointed out that users aren't complaining about how GDB
2594 casts integers to pointers; they are complaining that they can't
2595 take an address from a disassembly listing and give it to `x/i'.
2596 This is certainly important.
2597
2598 Adding an architecture method like integer_to_address() certainly
2599 makes it possible for GDB to "get it right" in all circumstances
2600 --- the target has complete control over how things get done, so
2601 people can Do The Right Thing for their target without breaking
2602 anyone else. The standard doesn't specify how integers get
2603 converted to pointers; usually, the ABI doesn't either, but
2604 ABI-specific code is a more reasonable place to handle it. */
2605
2606 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2607 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2608 && gdbarch_integer_to_address_p (gdbarch))
2609 return gdbarch_integer_to_address (gdbarch, value_type (val),
2610 value_contents (val));
2611
2612 return unpack_long (value_type (val), value_contents (val));
2613 #endif
2614 }
2615 \f
2616 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2617 as a long, or as a double, assuming the raw data is described
2618 by type TYPE. Knows how to convert different sizes of values
2619 and can convert between fixed and floating point. We don't assume
2620 any alignment for the raw data. Return value is in host byte order.
2621
2622 If you want functions and arrays to be coerced to pointers, and
2623 references to be dereferenced, call value_as_long() instead.
2624
2625 C++: It is assumed that the front-end has taken care of
2626 all matters concerning pointers to members. A pointer
2627 to member which reaches here is considered to be equivalent
2628 to an INT (or some size). After all, it is only an offset. */
2629
2630 LONGEST
2631 unpack_long (struct type *type, const gdb_byte *valaddr)
2632 {
2633 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2634 enum type_code code = TYPE_CODE (type);
2635 int len = TYPE_LENGTH (type);
2636 int nosign = TYPE_UNSIGNED (type);
2637
2638 switch (code)
2639 {
2640 case TYPE_CODE_TYPEDEF:
2641 return unpack_long (check_typedef (type), valaddr);
2642 case TYPE_CODE_ENUM:
2643 case TYPE_CODE_FLAGS:
2644 case TYPE_CODE_BOOL:
2645 case TYPE_CODE_INT:
2646 case TYPE_CODE_CHAR:
2647 case TYPE_CODE_RANGE:
2648 case TYPE_CODE_MEMBERPTR:
2649 if (nosign)
2650 return extract_unsigned_integer (valaddr, len, byte_order);
2651 else
2652 return extract_signed_integer (valaddr, len, byte_order);
2653
2654 case TYPE_CODE_FLT:
2655 return extract_typed_floating (valaddr, type);
2656
2657 case TYPE_CODE_DECFLOAT:
2658 /* libdecnumber has a function to convert from decimal to integer, but
2659 it doesn't work when the decimal number has a fractional part. */
2660 return decimal_to_doublest (valaddr, len, byte_order);
2661
2662 case TYPE_CODE_PTR:
2663 case TYPE_CODE_REF:
2664 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2665 whether we want this to be true eventually. */
2666 return extract_typed_address (valaddr, type);
2667
2668 default:
2669 error (_("Value can't be converted to integer."));
2670 }
2671 return 0; /* Placate lint. */
2672 }
2673
2674 /* Return a double value from the specified type and address.
2675 INVP points to an int which is set to 0 for valid value,
2676 1 for invalid value (bad float format). In either case,
2677 the returned double is OK to use. Argument is in target
2678 format, result is in host format. */
2679
2680 DOUBLEST
2681 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2682 {
2683 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2684 enum type_code code;
2685 int len;
2686 int nosign;
2687
2688 *invp = 0; /* Assume valid. */
2689 CHECK_TYPEDEF (type);
2690 code = TYPE_CODE (type);
2691 len = TYPE_LENGTH (type);
2692 nosign = TYPE_UNSIGNED (type);
2693 if (code == TYPE_CODE_FLT)
2694 {
2695 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2696 floating-point value was valid (using the macro
2697 INVALID_FLOAT). That test/macro have been removed.
2698
2699 It turns out that only the VAX defined this macro and then
2700 only in a non-portable way. Fixing the portability problem
2701 wouldn't help since the VAX floating-point code is also badly
2702 bit-rotten. The target needs to add definitions for the
2703 methods gdbarch_float_format and gdbarch_double_format - these
2704 exactly describe the target floating-point format. The
2705 problem here is that the corresponding floatformat_vax_f and
2706 floatformat_vax_d values these methods should be set to are
2707 also not defined either. Oops!
2708
2709 Hopefully someone will add both the missing floatformat
2710 definitions and the new cases for floatformat_is_valid (). */
2711
2712 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2713 {
2714 *invp = 1;
2715 return 0.0;
2716 }
2717
2718 return extract_typed_floating (valaddr, type);
2719 }
2720 else if (code == TYPE_CODE_DECFLOAT)
2721 return decimal_to_doublest (valaddr, len, byte_order);
2722 else if (nosign)
2723 {
2724 /* Unsigned -- be sure we compensate for signed LONGEST. */
2725 return (ULONGEST) unpack_long (type, valaddr);
2726 }
2727 else
2728 {
2729 /* Signed -- we are OK with unpack_long. */
2730 return unpack_long (type, valaddr);
2731 }
2732 }
2733
2734 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2735 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2736 We don't assume any alignment for the raw data. Return value is in
2737 host byte order.
2738
2739 If you want functions and arrays to be coerced to pointers, and
2740 references to be dereferenced, call value_as_address() instead.
2741
2742 C++: It is assumed that the front-end has taken care of
2743 all matters concerning pointers to members. A pointer
2744 to member which reaches here is considered to be equivalent
2745 to an INT (or some size). After all, it is only an offset. */
2746
2747 CORE_ADDR
2748 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2749 {
2750 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2751 whether we want this to be true eventually. */
2752 return unpack_long (type, valaddr);
2753 }
2754
2755 \f
2756 /* Get the value of the FIELDNO'th field (which must be static) of
2757 TYPE. */
2758
2759 struct value *
2760 value_static_field (struct type *type, int fieldno)
2761 {
2762 struct value *retval;
2763
2764 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2765 {
2766 case FIELD_LOC_KIND_PHYSADDR:
2767 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2768 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2769 break;
2770 case FIELD_LOC_KIND_PHYSNAME:
2771 {
2772 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2773 /* TYPE_FIELD_NAME (type, fieldno); */
2774 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2775
2776 if (sym == NULL)
2777 {
2778 /* With some compilers, e.g. HP aCC, static data members are
2779 reported as non-debuggable symbols. */
2780 struct bound_minimal_symbol msym
2781 = lookup_minimal_symbol (phys_name, NULL, NULL);
2782
2783 if (!msym.minsym)
2784 return allocate_optimized_out_value (type);
2785 else
2786 {
2787 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2788 BMSYMBOL_VALUE_ADDRESS (msym));
2789 }
2790 }
2791 else
2792 retval = value_of_variable (sym, NULL);
2793 break;
2794 }
2795 default:
2796 gdb_assert_not_reached ("unexpected field location kind");
2797 }
2798
2799 return retval;
2800 }
2801
2802 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2803 You have to be careful here, since the size of the data area for the value
2804 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2805 than the old enclosing type, you have to allocate more space for the
2806 data. */
2807
2808 void
2809 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2810 {
2811 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2812 val->contents =
2813 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2814
2815 val->enclosing_type = new_encl_type;
2816 }
2817
2818 /* Given a value ARG1 (offset by OFFSET bytes)
2819 of a struct or union type ARG_TYPE,
2820 extract and return the value of one of its (non-static) fields.
2821 FIELDNO says which field. */
2822
2823 struct value *
2824 value_primitive_field (struct value *arg1, int offset,
2825 int fieldno, struct type *arg_type)
2826 {
2827 struct value *v;
2828 struct type *type;
2829
2830 CHECK_TYPEDEF (arg_type);
2831 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2832
2833 /* Call check_typedef on our type to make sure that, if TYPE
2834 is a TYPE_CODE_TYPEDEF, its length is set to the length
2835 of the target type instead of zero. However, we do not
2836 replace the typedef type by the target type, because we want
2837 to keep the typedef in order to be able to print the type
2838 description correctly. */
2839 check_typedef (type);
2840
2841 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2842 {
2843 /* Handle packed fields.
2844
2845 Create a new value for the bitfield, with bitpos and bitsize
2846 set. If possible, arrange offset and bitpos so that we can
2847 do a single aligned read of the size of the containing type.
2848 Otherwise, adjust offset to the byte containing the first
2849 bit. Assume that the address, offset, and embedded offset
2850 are sufficiently aligned. */
2851
2852 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2853 int container_bitsize = TYPE_LENGTH (type) * 8;
2854
2855 if (arg1->optimized_out)
2856 v = allocate_optimized_out_value (type);
2857 else
2858 {
2859 v = allocate_value_lazy (type);
2860 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2861 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2862 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2863 v->bitpos = bitpos % container_bitsize;
2864 else
2865 v->bitpos = bitpos % 8;
2866 v->offset = (value_embedded_offset (arg1)
2867 + offset
2868 + (bitpos - v->bitpos) / 8);
2869 set_value_parent (v, arg1);
2870 if (!value_lazy (arg1))
2871 value_fetch_lazy (v);
2872 }
2873 }
2874 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2875 {
2876 /* This field is actually a base subobject, so preserve the
2877 entire object's contents for later references to virtual
2878 bases, etc. */
2879 int boffset;
2880
2881 /* Lazy register values with offsets are not supported. */
2882 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2883 value_fetch_lazy (arg1);
2884
2885 /* The optimized_out flag is only set correctly once a lazy value is
2886 loaded, having just loaded some lazy values we should check the
2887 optimized out case now. */
2888 if (arg1->optimized_out)
2889 v = allocate_optimized_out_value (type);
2890 else
2891 {
2892 /* We special case virtual inheritance here because this
2893 requires access to the contents, which we would rather avoid
2894 for references to ordinary fields of unavailable values. */
2895 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2896 boffset = baseclass_offset (arg_type, fieldno,
2897 value_contents (arg1),
2898 value_embedded_offset (arg1),
2899 value_address (arg1),
2900 arg1);
2901 else
2902 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2903
2904 if (value_lazy (arg1))
2905 v = allocate_value_lazy (value_enclosing_type (arg1));
2906 else
2907 {
2908 v = allocate_value (value_enclosing_type (arg1));
2909 value_contents_copy_raw (v, 0, arg1, 0,
2910 TYPE_LENGTH (value_enclosing_type (arg1)));
2911 }
2912 v->type = type;
2913 v->offset = value_offset (arg1);
2914 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2915 }
2916 }
2917 else
2918 {
2919 /* Plain old data member */
2920 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2921
2922 /* Lazy register values with offsets are not supported. */
2923 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2924 value_fetch_lazy (arg1);
2925
2926 /* The optimized_out flag is only set correctly once a lazy value is
2927 loaded, having just loaded some lazy values we should check for
2928 the optimized out case now. */
2929 if (arg1->optimized_out)
2930 v = allocate_optimized_out_value (type);
2931 else if (value_lazy (arg1))
2932 v = allocate_value_lazy (type);
2933 else
2934 {
2935 v = allocate_value (type);
2936 value_contents_copy_raw (v, value_embedded_offset (v),
2937 arg1, value_embedded_offset (arg1) + offset,
2938 TYPE_LENGTH (type));
2939 }
2940 v->offset = (value_offset (arg1) + offset
2941 + value_embedded_offset (arg1));
2942 }
2943 set_value_component_location (v, arg1);
2944 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2945 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2946 return v;
2947 }
2948
2949 /* Given a value ARG1 of a struct or union type,
2950 extract and return the value of one of its (non-static) fields.
2951 FIELDNO says which field. */
2952
2953 struct value *
2954 value_field (struct value *arg1, int fieldno)
2955 {
2956 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2957 }
2958
2959 /* Return a non-virtual function as a value.
2960 F is the list of member functions which contains the desired method.
2961 J is an index into F which provides the desired method.
2962
2963 We only use the symbol for its address, so be happy with either a
2964 full symbol or a minimal symbol. */
2965
2966 struct value *
2967 value_fn_field (struct value **arg1p, struct fn_field *f,
2968 int j, struct type *type,
2969 int offset)
2970 {
2971 struct value *v;
2972 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2973 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2974 struct symbol *sym;
2975 struct bound_minimal_symbol msym;
2976
2977 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2978 if (sym != NULL)
2979 {
2980 memset (&msym, 0, sizeof (msym));
2981 }
2982 else
2983 {
2984 gdb_assert (sym == NULL);
2985 msym = lookup_bound_minimal_symbol (physname);
2986 if (msym.minsym == NULL)
2987 return NULL;
2988 }
2989
2990 v = allocate_value (ftype);
2991 if (sym)
2992 {
2993 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
2994 }
2995 else
2996 {
2997 /* The minimal symbol might point to a function descriptor;
2998 resolve it to the actual code address instead. */
2999 struct objfile *objfile = msym.objfile;
3000 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3001
3002 set_value_address (v,
3003 gdbarch_convert_from_func_ptr_addr
3004 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3005 }
3006
3007 if (arg1p)
3008 {
3009 if (type != value_type (*arg1p))
3010 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3011 value_addr (*arg1p)));
3012
3013 /* Move the `this' pointer according to the offset.
3014 VALUE_OFFSET (*arg1p) += offset; */
3015 }
3016
3017 return v;
3018 }
3019
3020 \f
3021
3022 /* Helper function for both unpack_value_bits_as_long and
3023 unpack_bits_as_long. See those functions for more details on the
3024 interface; the only difference is that this function accepts either
3025 a NULL or a non-NULL ORIGINAL_VALUE. */
3026
3027 static int
3028 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr,
3029 int embedded_offset, int bitpos, int bitsize,
3030 const struct value *original_value,
3031 LONGEST *result)
3032 {
3033 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3034 ULONGEST val;
3035 ULONGEST valmask;
3036 int lsbcount;
3037 int bytes_read;
3038 int read_offset;
3039
3040 /* Read the minimum number of bytes required; there may not be
3041 enough bytes to read an entire ULONGEST. */
3042 CHECK_TYPEDEF (field_type);
3043 if (bitsize)
3044 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3045 else
3046 bytes_read = TYPE_LENGTH (field_type);
3047
3048 read_offset = bitpos / 8;
3049
3050 if (original_value != NULL
3051 && !value_bits_available (original_value, embedded_offset + bitpos,
3052 bitsize))
3053 return 0;
3054
3055 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset,
3056 bytes_read, byte_order);
3057
3058 /* Extract bits. See comment above. */
3059
3060 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3061 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3062 else
3063 lsbcount = (bitpos % 8);
3064 val >>= lsbcount;
3065
3066 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3067 If the field is signed, and is negative, then sign extend. */
3068
3069 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3070 {
3071 valmask = (((ULONGEST) 1) << bitsize) - 1;
3072 val &= valmask;
3073 if (!TYPE_UNSIGNED (field_type))
3074 {
3075 if (val & (valmask ^ (valmask >> 1)))
3076 {
3077 val |= ~valmask;
3078 }
3079 }
3080 }
3081
3082 *result = val;
3083 return 1;
3084 }
3085
3086 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3087 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT.
3088 VALADDR points to the contents of ORIGINAL_VALUE, which must not be
3089 NULL. The bitfield starts at BITPOS bits and contains BITSIZE
3090 bits.
3091
3092 Returns false if the value contents are unavailable, otherwise
3093 returns true, indicating a valid value has been stored in *RESULT.
3094
3095 Extracting bits depends on endianness of the machine. Compute the
3096 number of least significant bits to discard. For big endian machines,
3097 we compute the total number of bits in the anonymous object, subtract
3098 off the bit count from the MSB of the object to the MSB of the
3099 bitfield, then the size of the bitfield, which leaves the LSB discard
3100 count. For little endian machines, the discard count is simply the
3101 number of bits from the LSB of the anonymous object to the LSB of the
3102 bitfield.
3103
3104 If the field is signed, we also do sign extension. */
3105
3106 int
3107 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3108 int embedded_offset, int bitpos, int bitsize,
3109 const struct value *original_value,
3110 LONGEST *result)
3111 {
3112 gdb_assert (original_value != NULL);
3113
3114 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3115 bitpos, bitsize, original_value, result);
3116
3117 }
3118
3119 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3120 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3121 ORIGINAL_VALUE. See unpack_value_bits_as_long for more
3122 details. */
3123
3124 static int
3125 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr,
3126 int embedded_offset, int fieldno,
3127 const struct value *val, LONGEST *result)
3128 {
3129 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3130 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3131 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3132
3133 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3134 bitpos, bitsize, val,
3135 result);
3136 }
3137
3138 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3139 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3140 ORIGINAL_VALUE, which must not be NULL. See
3141 unpack_value_bits_as_long for more details. */
3142
3143 int
3144 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3145 int embedded_offset, int fieldno,
3146 const struct value *val, LONGEST *result)
3147 {
3148 gdb_assert (val != NULL);
3149
3150 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset,
3151 fieldno, val, result);
3152 }
3153
3154 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3155 object at VALADDR. See unpack_value_bits_as_long for more details.
3156 This function differs from unpack_value_field_as_long in that it
3157 operates without a struct value object. */
3158
3159 LONGEST
3160 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3161 {
3162 LONGEST result;
3163
3164 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result);
3165 return result;
3166 }
3167
3168 /* Return a new value with type TYPE, which is FIELDNO field of the
3169 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3170 of VAL. If the VAL's contents required to extract the bitfield
3171 from are unavailable, the new value is correspondingly marked as
3172 unavailable. */
3173
3174 struct value *
3175 value_field_bitfield (struct type *type, int fieldno,
3176 const gdb_byte *valaddr,
3177 int embedded_offset, const struct value *val)
3178 {
3179 LONGEST l;
3180
3181 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno,
3182 val, &l))
3183 {
3184 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3185 struct value *retval = allocate_value (field_type);
3186 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type));
3187 return retval;
3188 }
3189 else
3190 {
3191 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l);
3192 }
3193 }
3194
3195 /* Modify the value of a bitfield. ADDR points to a block of memory in
3196 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3197 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3198 indicate which bits (in target bit order) comprise the bitfield.
3199 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3200 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3201
3202 void
3203 modify_field (struct type *type, gdb_byte *addr,
3204 LONGEST fieldval, int bitpos, int bitsize)
3205 {
3206 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3207 ULONGEST oword;
3208 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3209 int bytesize;
3210
3211 /* Normalize BITPOS. */
3212 addr += bitpos / 8;
3213 bitpos %= 8;
3214
3215 /* If a negative fieldval fits in the field in question, chop
3216 off the sign extension bits. */
3217 if ((~fieldval & ~(mask >> 1)) == 0)
3218 fieldval &= mask;
3219
3220 /* Warn if value is too big to fit in the field in question. */
3221 if (0 != (fieldval & ~mask))
3222 {
3223 /* FIXME: would like to include fieldval in the message, but
3224 we don't have a sprintf_longest. */
3225 warning (_("Value does not fit in %d bits."), bitsize);
3226
3227 /* Truncate it, otherwise adjoining fields may be corrupted. */
3228 fieldval &= mask;
3229 }
3230
3231 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3232 false valgrind reports. */
3233
3234 bytesize = (bitpos + bitsize + 7) / 8;
3235 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3236
3237 /* Shifting for bit field depends on endianness of the target machine. */
3238 if (gdbarch_bits_big_endian (get_type_arch (type)))
3239 bitpos = bytesize * 8 - bitpos - bitsize;
3240
3241 oword &= ~(mask << bitpos);
3242 oword |= fieldval << bitpos;
3243
3244 store_unsigned_integer (addr, bytesize, byte_order, oword);
3245 }
3246 \f
3247 /* Pack NUM into BUF using a target format of TYPE. */
3248
3249 void
3250 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3251 {
3252 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3253 int len;
3254
3255 type = check_typedef (type);
3256 len = TYPE_LENGTH (type);
3257
3258 switch (TYPE_CODE (type))
3259 {
3260 case TYPE_CODE_INT:
3261 case TYPE_CODE_CHAR:
3262 case TYPE_CODE_ENUM:
3263 case TYPE_CODE_FLAGS:
3264 case TYPE_CODE_BOOL:
3265 case TYPE_CODE_RANGE:
3266 case TYPE_CODE_MEMBERPTR:
3267 store_signed_integer (buf, len, byte_order, num);
3268 break;
3269
3270 case TYPE_CODE_REF:
3271 case TYPE_CODE_PTR:
3272 store_typed_address (buf, type, (CORE_ADDR) num);
3273 break;
3274
3275 default:
3276 error (_("Unexpected type (%d) encountered for integer constant."),
3277 TYPE_CODE (type));
3278 }
3279 }
3280
3281
3282 /* Pack NUM into BUF using a target format of TYPE. */
3283
3284 static void
3285 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3286 {
3287 int len;
3288 enum bfd_endian byte_order;
3289
3290 type = check_typedef (type);
3291 len = TYPE_LENGTH (type);
3292 byte_order = gdbarch_byte_order (get_type_arch (type));
3293
3294 switch (TYPE_CODE (type))
3295 {
3296 case TYPE_CODE_INT:
3297 case TYPE_CODE_CHAR:
3298 case TYPE_CODE_ENUM:
3299 case TYPE_CODE_FLAGS:
3300 case TYPE_CODE_BOOL:
3301 case TYPE_CODE_RANGE:
3302 case TYPE_CODE_MEMBERPTR:
3303 store_unsigned_integer (buf, len, byte_order, num);
3304 break;
3305
3306 case TYPE_CODE_REF:
3307 case TYPE_CODE_PTR:
3308 store_typed_address (buf, type, (CORE_ADDR) num);
3309 break;
3310
3311 default:
3312 error (_("Unexpected type (%d) encountered "
3313 "for unsigned integer constant."),
3314 TYPE_CODE (type));
3315 }
3316 }
3317
3318
3319 /* Convert C numbers into newly allocated values. */
3320
3321 struct value *
3322 value_from_longest (struct type *type, LONGEST num)
3323 {
3324 struct value *val = allocate_value (type);
3325
3326 pack_long (value_contents_raw (val), type, num);
3327 return val;
3328 }
3329
3330
3331 /* Convert C unsigned numbers into newly allocated values. */
3332
3333 struct value *
3334 value_from_ulongest (struct type *type, ULONGEST num)
3335 {
3336 struct value *val = allocate_value (type);
3337
3338 pack_unsigned_long (value_contents_raw (val), type, num);
3339
3340 return val;
3341 }
3342
3343
3344 /* Create a value representing a pointer of type TYPE to the address
3345 ADDR. */
3346
3347 struct value *
3348 value_from_pointer (struct type *type, CORE_ADDR addr)
3349 {
3350 struct value *val = allocate_value (type);
3351
3352 store_typed_address (value_contents_raw (val),
3353 check_typedef (type), addr);
3354 return val;
3355 }
3356
3357
3358 /* Create a value of type TYPE whose contents come from VALADDR, if it
3359 is non-null, and whose memory address (in the inferior) is
3360 ADDRESS. The type of the created value may differ from the passed
3361 type TYPE. Make sure to retrieve values new type after this call.
3362 Note that TYPE is not passed through resolve_dynamic_type; this is
3363 a special API intended for use only by Ada. */
3364
3365 struct value *
3366 value_from_contents_and_address_unresolved (struct type *type,
3367 const gdb_byte *valaddr,
3368 CORE_ADDR address)
3369 {
3370 struct value *v;
3371
3372 if (valaddr == NULL)
3373 v = allocate_value_lazy (type);
3374 else
3375 v = value_from_contents (type, valaddr);
3376 set_value_address (v, address);
3377 VALUE_LVAL (v) = lval_memory;
3378 return v;
3379 }
3380
3381 /* Create a value of type TYPE whose contents come from VALADDR, if it
3382 is non-null, and whose memory address (in the inferior) is
3383 ADDRESS. The type of the created value may differ from the passed
3384 type TYPE. Make sure to retrieve values new type after this call. */
3385
3386 struct value *
3387 value_from_contents_and_address (struct type *type,
3388 const gdb_byte *valaddr,
3389 CORE_ADDR address)
3390 {
3391 struct type *resolved_type = resolve_dynamic_type (type, address);
3392 struct value *v;
3393
3394 if (valaddr == NULL)
3395 v = allocate_value_lazy (resolved_type);
3396 else
3397 v = value_from_contents (resolved_type, valaddr);
3398 if (TYPE_DATA_LOCATION (resolved_type) != NULL
3399 && TYPE_DATA_LOCATION_KIND (resolved_type) == PROP_CONST)
3400 address = TYPE_DATA_LOCATION_ADDR (resolved_type);
3401 set_value_address (v, address);
3402 VALUE_LVAL (v) = lval_memory;
3403 return v;
3404 }
3405
3406 /* Create a value of type TYPE holding the contents CONTENTS.
3407 The new value is `not_lval'. */
3408
3409 struct value *
3410 value_from_contents (struct type *type, const gdb_byte *contents)
3411 {
3412 struct value *result;
3413
3414 result = allocate_value (type);
3415 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3416 return result;
3417 }
3418
3419 struct value *
3420 value_from_double (struct type *type, DOUBLEST num)
3421 {
3422 struct value *val = allocate_value (type);
3423 struct type *base_type = check_typedef (type);
3424 enum type_code code = TYPE_CODE (base_type);
3425
3426 if (code == TYPE_CODE_FLT)
3427 {
3428 store_typed_floating (value_contents_raw (val), base_type, num);
3429 }
3430 else
3431 error (_("Unexpected type encountered for floating constant."));
3432
3433 return val;
3434 }
3435
3436 struct value *
3437 value_from_decfloat (struct type *type, const gdb_byte *dec)
3438 {
3439 struct value *val = allocate_value (type);
3440
3441 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3442 return val;
3443 }
3444
3445 /* Extract a value from the history file. Input will be of the form
3446 $digits or $$digits. See block comment above 'write_dollar_variable'
3447 for details. */
3448
3449 struct value *
3450 value_from_history_ref (const char *h, const char **endp)
3451 {
3452 int index, len;
3453
3454 if (h[0] == '$')
3455 len = 1;
3456 else
3457 return NULL;
3458
3459 if (h[1] == '$')
3460 len = 2;
3461
3462 /* Find length of numeral string. */
3463 for (; isdigit (h[len]); len++)
3464 ;
3465
3466 /* Make sure numeral string is not part of an identifier. */
3467 if (h[len] == '_' || isalpha (h[len]))
3468 return NULL;
3469
3470 /* Now collect the index value. */
3471 if (h[1] == '$')
3472 {
3473 if (len == 2)
3474 {
3475 /* For some bizarre reason, "$$" is equivalent to "$$1",
3476 rather than to "$$0" as it ought to be! */
3477 index = -1;
3478 *endp += len;
3479 }
3480 else
3481 {
3482 char *local_end;
3483
3484 index = -strtol (&h[2], &local_end, 10);
3485 *endp = local_end;
3486 }
3487 }
3488 else
3489 {
3490 if (len == 1)
3491 {
3492 /* "$" is equivalent to "$0". */
3493 index = 0;
3494 *endp += len;
3495 }
3496 else
3497 {
3498 char *local_end;
3499
3500 index = strtol (&h[1], &local_end, 10);
3501 *endp = local_end;
3502 }
3503 }
3504
3505 return access_value_history (index);
3506 }
3507
3508 struct value *
3509 coerce_ref_if_computed (const struct value *arg)
3510 {
3511 const struct lval_funcs *funcs;
3512
3513 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3514 return NULL;
3515
3516 if (value_lval_const (arg) != lval_computed)
3517 return NULL;
3518
3519 funcs = value_computed_funcs (arg);
3520 if (funcs->coerce_ref == NULL)
3521 return NULL;
3522
3523 return funcs->coerce_ref (arg);
3524 }
3525
3526 /* Look at value.h for description. */
3527
3528 struct value *
3529 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3530 struct type *original_type,
3531 struct value *original_value)
3532 {
3533 /* Re-adjust type. */
3534 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3535
3536 /* Add embedding info. */
3537 set_value_enclosing_type (value, enc_type);
3538 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3539
3540 /* We may be pointing to an object of some derived type. */
3541 return value_full_object (value, NULL, 0, 0, 0);
3542 }
3543
3544 struct value *
3545 coerce_ref (struct value *arg)
3546 {
3547 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3548 struct value *retval;
3549 struct type *enc_type;
3550
3551 retval = coerce_ref_if_computed (arg);
3552 if (retval)
3553 return retval;
3554
3555 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3556 return arg;
3557
3558 enc_type = check_typedef (value_enclosing_type (arg));
3559 enc_type = TYPE_TARGET_TYPE (enc_type);
3560
3561 retval = value_at_lazy (enc_type,
3562 unpack_pointer (value_type (arg),
3563 value_contents (arg)));
3564 enc_type = value_type (retval);
3565 return readjust_indirect_value_type (retval, enc_type,
3566 value_type_arg_tmp, arg);
3567 }
3568
3569 struct value *
3570 coerce_array (struct value *arg)
3571 {
3572 struct type *type;
3573
3574 arg = coerce_ref (arg);
3575 type = check_typedef (value_type (arg));
3576
3577 switch (TYPE_CODE (type))
3578 {
3579 case TYPE_CODE_ARRAY:
3580 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3581 arg = value_coerce_array (arg);
3582 break;
3583 case TYPE_CODE_FUNC:
3584 arg = value_coerce_function (arg);
3585 break;
3586 }
3587 return arg;
3588 }
3589 \f
3590
3591 /* Return the return value convention that will be used for the
3592 specified type. */
3593
3594 enum return_value_convention
3595 struct_return_convention (struct gdbarch *gdbarch,
3596 struct value *function, struct type *value_type)
3597 {
3598 enum type_code code = TYPE_CODE (value_type);
3599
3600 if (code == TYPE_CODE_ERROR)
3601 error (_("Function return type unknown."));
3602
3603 /* Probe the architecture for the return-value convention. */
3604 return gdbarch_return_value (gdbarch, function, value_type,
3605 NULL, NULL, NULL);
3606 }
3607
3608 /* Return true if the function returning the specified type is using
3609 the convention of returning structures in memory (passing in the
3610 address as a hidden first parameter). */
3611
3612 int
3613 using_struct_return (struct gdbarch *gdbarch,
3614 struct value *function, struct type *value_type)
3615 {
3616 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3617 /* A void return value is never in memory. See also corresponding
3618 code in "print_return_value". */
3619 return 0;
3620
3621 return (struct_return_convention (gdbarch, function, value_type)
3622 != RETURN_VALUE_REGISTER_CONVENTION);
3623 }
3624
3625 /* Set the initialized field in a value struct. */
3626
3627 void
3628 set_value_initialized (struct value *val, int status)
3629 {
3630 val->initialized = status;
3631 }
3632
3633 /* Return the initialized field in a value struct. */
3634
3635 int
3636 value_initialized (struct value *val)
3637 {
3638 return val->initialized;
3639 }
3640
3641 /* Called only from the value_contents and value_contents_all()
3642 macros, if the current data for a variable needs to be loaded into
3643 value_contents(VAL). Fetches the data from the user's process, and
3644 clears the lazy flag to indicate that the data in the buffer is
3645 valid.
3646
3647 If the value is zero-length, we avoid calling read_memory, which
3648 would abort. We mark the value as fetched anyway -- all 0 bytes of
3649 it.
3650
3651 This function returns a value because it is used in the
3652 value_contents macro as part of an expression, where a void would
3653 not work. The value is ignored. */
3654
3655 int
3656 value_fetch_lazy (struct value *val)
3657 {
3658 gdb_assert (value_lazy (val));
3659 allocate_value_contents (val);
3660 if (value_bitsize (val))
3661 {
3662 /* To read a lazy bitfield, read the entire enclosing value. This
3663 prevents reading the same block of (possibly volatile) memory once
3664 per bitfield. It would be even better to read only the containing
3665 word, but we have no way to record that just specific bits of a
3666 value have been fetched. */
3667 struct type *type = check_typedef (value_type (val));
3668 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3669 struct value *parent = value_parent (val);
3670 LONGEST offset = value_offset (val);
3671 LONGEST num;
3672
3673 if (value_lazy (parent))
3674 value_fetch_lazy (parent);
3675
3676 if (!value_bits_valid (parent,
3677 TARGET_CHAR_BIT * offset + value_bitpos (val),
3678 value_bitsize (val)))
3679 set_value_optimized_out (val, 1);
3680 else if (!unpack_value_bits_as_long (value_type (val),
3681 value_contents_for_printing (parent),
3682 offset,
3683 value_bitpos (val),
3684 value_bitsize (val), parent, &num))
3685 mark_value_bytes_unavailable (val,
3686 value_embedded_offset (val),
3687 TYPE_LENGTH (type));
3688 else
3689 store_signed_integer (value_contents_raw (val), TYPE_LENGTH (type),
3690 byte_order, num);
3691 }
3692 else if (VALUE_LVAL (val) == lval_memory)
3693 {
3694 CORE_ADDR addr = value_address (val);
3695 struct type *type = check_typedef (value_enclosing_type (val));
3696
3697 if (TYPE_LENGTH (type))
3698 read_value_memory (val, 0, value_stack (val),
3699 addr, value_contents_all_raw (val),
3700 TYPE_LENGTH (type));
3701 }
3702 else if (VALUE_LVAL (val) == lval_register)
3703 {
3704 struct frame_info *frame;
3705 int regnum;
3706 struct type *type = check_typedef (value_type (val));
3707 struct value *new_val = val, *mark = value_mark ();
3708
3709 /* Offsets are not supported here; lazy register values must
3710 refer to the entire register. */
3711 gdb_assert (value_offset (val) == 0);
3712
3713 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3714 {
3715 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3716
3717 frame = frame_find_by_id (frame_id);
3718 regnum = VALUE_REGNUM (new_val);
3719
3720 gdb_assert (frame != NULL);
3721
3722 /* Convertible register routines are used for multi-register
3723 values and for interpretation in different types
3724 (e.g. float or int from a double register). Lazy
3725 register values should have the register's natural type,
3726 so they do not apply. */
3727 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3728 regnum, type));
3729
3730 new_val = get_frame_register_value (frame, regnum);
3731
3732 /* If we get another lazy lval_register value, it means the
3733 register is found by reading it from the next frame.
3734 get_frame_register_value should never return a value with
3735 the frame id pointing to FRAME. If it does, it means we
3736 either have two consecutive frames with the same frame id
3737 in the frame chain, or some code is trying to unwind
3738 behind get_prev_frame's back (e.g., a frame unwind
3739 sniffer trying to unwind), bypassing its validations. In
3740 any case, it should always be an internal error to end up
3741 in this situation. */
3742 if (VALUE_LVAL (new_val) == lval_register
3743 && value_lazy (new_val)
3744 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3745 internal_error (__FILE__, __LINE__,
3746 _("infinite loop while fetching a register"));
3747 }
3748
3749 /* If it's still lazy (for instance, a saved register on the
3750 stack), fetch it. */
3751 if (value_lazy (new_val))
3752 value_fetch_lazy (new_val);
3753
3754 /* If the register was not saved, mark it optimized out. */
3755 if (value_optimized_out (new_val))
3756 set_value_optimized_out (val, 1);
3757 else
3758 {
3759 set_value_lazy (val, 0);
3760 value_contents_copy (val, value_embedded_offset (val),
3761 new_val, value_embedded_offset (new_val),
3762 TYPE_LENGTH (type));
3763 }
3764
3765 if (frame_debug)
3766 {
3767 struct gdbarch *gdbarch;
3768 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3769 regnum = VALUE_REGNUM (val);
3770 gdbarch = get_frame_arch (frame);
3771
3772 fprintf_unfiltered (gdb_stdlog,
3773 "{ value_fetch_lazy "
3774 "(frame=%d,regnum=%d(%s),...) ",
3775 frame_relative_level (frame), regnum,
3776 user_reg_map_regnum_to_name (gdbarch, regnum));
3777
3778 fprintf_unfiltered (gdb_stdlog, "->");
3779 if (value_optimized_out (new_val))
3780 {
3781 fprintf_unfiltered (gdb_stdlog, " ");
3782 val_print_optimized_out (new_val, gdb_stdlog);
3783 }
3784 else
3785 {
3786 int i;
3787 const gdb_byte *buf = value_contents (new_val);
3788
3789 if (VALUE_LVAL (new_val) == lval_register)
3790 fprintf_unfiltered (gdb_stdlog, " register=%d",
3791 VALUE_REGNUM (new_val));
3792 else if (VALUE_LVAL (new_val) == lval_memory)
3793 fprintf_unfiltered (gdb_stdlog, " address=%s",
3794 paddress (gdbarch,
3795 value_address (new_val)));
3796 else
3797 fprintf_unfiltered (gdb_stdlog, " computed");
3798
3799 fprintf_unfiltered (gdb_stdlog, " bytes=");
3800 fprintf_unfiltered (gdb_stdlog, "[");
3801 for (i = 0; i < register_size (gdbarch, regnum); i++)
3802 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3803 fprintf_unfiltered (gdb_stdlog, "]");
3804 }
3805
3806 fprintf_unfiltered (gdb_stdlog, " }\n");
3807 }
3808
3809 /* Dispose of the intermediate values. This prevents
3810 watchpoints from trying to watch the saved frame pointer. */
3811 value_free_to_mark (mark);
3812 }
3813 else if (VALUE_LVAL (val) == lval_computed
3814 && value_computed_funcs (val)->read != NULL)
3815 value_computed_funcs (val)->read (val);
3816 /* Don't call value_optimized_out on val, doing so would result in a
3817 recursive call back to value_fetch_lazy, instead check the
3818 optimized_out flag directly. */
3819 else if (val->optimized_out)
3820 /* Keep it optimized out. */;
3821 else
3822 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3823
3824 set_value_lazy (val, 0);
3825 return 0;
3826 }
3827
3828 /* Implementation of the convenience function $_isvoid. */
3829
3830 static struct value *
3831 isvoid_internal_fn (struct gdbarch *gdbarch,
3832 const struct language_defn *language,
3833 void *cookie, int argc, struct value **argv)
3834 {
3835 int ret;
3836
3837 if (argc != 1)
3838 error (_("You must provide one argument for $_isvoid."));
3839
3840 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3841
3842 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3843 }
3844
3845 void
3846 _initialize_values (void)
3847 {
3848 add_cmd ("convenience", no_class, show_convenience, _("\
3849 Debugger convenience (\"$foo\") variables and functions.\n\
3850 Convenience variables are created when you assign them values;\n\
3851 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3852 \n\
3853 A few convenience variables are given values automatically:\n\
3854 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3855 \"$__\" holds the contents of the last address examined with \"x\"."
3856 #ifdef HAVE_PYTHON
3857 "\n\n\
3858 Convenience functions are defined via the Python API."
3859 #endif
3860 ), &showlist);
3861 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3862
3863 add_cmd ("values", no_set_class, show_values, _("\
3864 Elements of value history around item number IDX (or last ten)."),
3865 &showlist);
3866
3867 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3868 Initialize a convenience variable if necessary.\n\
3869 init-if-undefined VARIABLE = EXPRESSION\n\
3870 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3871 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3872 VARIABLE is already initialized."));
3873
3874 add_prefix_cmd ("function", no_class, function_command, _("\
3875 Placeholder command for showing help on convenience functions."),
3876 &functionlist, "function ", 0, &cmdlist);
3877
3878 add_internal_function ("_isvoid", _("\
3879 Check whether an expression is void.\n\
3880 Usage: $_isvoid (expression)\n\
3881 Return 1 if the expression is void, zero otherwise."),
3882 isvoid_internal_fn, NULL);
3883 }
This page took 0.101285 seconds and 3 git commands to generate.