Include gdb_assert.h in common-defs.h
[deliverable/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include <string.h>
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "command.h"
28 #include "gdbcmd.h"
29 #include "target.h"
30 #include "language.h"
31 #include "demangle.h"
32 #include "doublest.h"
33 #include "regcache.h"
34 #include "block.h"
35 #include "dfp.h"
36 #include "objfiles.h"
37 #include "valprint.h"
38 #include "cli/cli-decode.h"
39 #include "exceptions.h"
40 #include "extension.h"
41 #include <ctype.h>
42 #include "tracepoint.h"
43 #include "cp-abi.h"
44 #include "user-regs.h"
45
46 /* Prototypes for exported functions. */
47
48 void _initialize_values (void);
49
50 /* Definition of a user function. */
51 struct internal_function
52 {
53 /* The name of the function. It is a bit odd to have this in the
54 function itself -- the user might use a differently-named
55 convenience variable to hold the function. */
56 char *name;
57
58 /* The handler. */
59 internal_function_fn handler;
60
61 /* User data for the handler. */
62 void *cookie;
63 };
64
65 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
66
67 struct range
68 {
69 /* Lowest offset in the range. */
70 int offset;
71
72 /* Length of the range. */
73 int length;
74 };
75
76 typedef struct range range_s;
77
78 DEF_VEC_O(range_s);
79
80 /* Returns true if the ranges defined by [offset1, offset1+len1) and
81 [offset2, offset2+len2) overlap. */
82
83 static int
84 ranges_overlap (int offset1, int len1,
85 int offset2, int len2)
86 {
87 ULONGEST h, l;
88
89 l = max (offset1, offset2);
90 h = min (offset1 + len1, offset2 + len2);
91 return (l < h);
92 }
93
94 /* Returns true if the first argument is strictly less than the
95 second, useful for VEC_lower_bound. We keep ranges sorted by
96 offset and coalesce overlapping and contiguous ranges, so this just
97 compares the starting offset. */
98
99 static int
100 range_lessthan (const range_s *r1, const range_s *r2)
101 {
102 return r1->offset < r2->offset;
103 }
104
105 /* Returns true if RANGES contains any range that overlaps [OFFSET,
106 OFFSET+LENGTH). */
107
108 static int
109 ranges_contain (VEC(range_s) *ranges, int offset, int length)
110 {
111 range_s what;
112 int i;
113
114 what.offset = offset;
115 what.length = length;
116
117 /* We keep ranges sorted by offset and coalesce overlapping and
118 contiguous ranges, so to check if a range list contains a given
119 range, we can do a binary search for the position the given range
120 would be inserted if we only considered the starting OFFSET of
121 ranges. We call that position I. Since we also have LENGTH to
122 care for (this is a range afterall), we need to check if the
123 _previous_ range overlaps the I range. E.g.,
124
125 R
126 |---|
127 |---| |---| |------| ... |--|
128 0 1 2 N
129
130 I=1
131
132 In the case above, the binary search would return `I=1', meaning,
133 this OFFSET should be inserted at position 1, and the current
134 position 1 should be pushed further (and before 2). But, `0'
135 overlaps with R.
136
137 Then we need to check if the I range overlaps the I range itself.
138 E.g.,
139
140 R
141 |---|
142 |---| |---| |-------| ... |--|
143 0 1 2 N
144
145 I=1
146 */
147
148 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
149
150 if (i > 0)
151 {
152 struct range *bef = VEC_index (range_s, ranges, i - 1);
153
154 if (ranges_overlap (bef->offset, bef->length, offset, length))
155 return 1;
156 }
157
158 if (i < VEC_length (range_s, ranges))
159 {
160 struct range *r = VEC_index (range_s, ranges, i);
161
162 if (ranges_overlap (r->offset, r->length, offset, length))
163 return 1;
164 }
165
166 return 0;
167 }
168
169 static struct cmd_list_element *functionlist;
170
171 /* Note that the fields in this structure are arranged to save a bit
172 of memory. */
173
174 struct value
175 {
176 /* Type of value; either not an lval, or one of the various
177 different possible kinds of lval. */
178 enum lval_type lval;
179
180 /* Is it modifiable? Only relevant if lval != not_lval. */
181 unsigned int modifiable : 1;
182
183 /* If zero, contents of this value are in the contents field. If
184 nonzero, contents are in inferior. If the lval field is lval_memory,
185 the contents are in inferior memory at location.address plus offset.
186 The lval field may also be lval_register.
187
188 WARNING: This field is used by the code which handles watchpoints
189 (see breakpoint.c) to decide whether a particular value can be
190 watched by hardware watchpoints. If the lazy flag is set for
191 some member of a value chain, it is assumed that this member of
192 the chain doesn't need to be watched as part of watching the
193 value itself. This is how GDB avoids watching the entire struct
194 or array when the user wants to watch a single struct member or
195 array element. If you ever change the way lazy flag is set and
196 reset, be sure to consider this use as well! */
197 unsigned int lazy : 1;
198
199 /* If nonzero, this is the value of a variable that does not
200 actually exist in the program. If nonzero, and LVAL is
201 lval_register, this is a register ($pc, $sp, etc., never a
202 program variable) that has not been saved in the frame. All
203 optimized-out values are treated pretty much the same, except
204 registers have a different string representation and related
205 error strings. */
206 unsigned int optimized_out : 1;
207
208 /* If value is a variable, is it initialized or not. */
209 unsigned int initialized : 1;
210
211 /* If value is from the stack. If this is set, read_stack will be
212 used instead of read_memory to enable extra caching. */
213 unsigned int stack : 1;
214
215 /* If the value has been released. */
216 unsigned int released : 1;
217
218 /* Register number if the value is from a register. */
219 short regnum;
220
221 /* Location of value (if lval). */
222 union
223 {
224 /* If lval == lval_memory, this is the address in the inferior.
225 If lval == lval_register, this is the byte offset into the
226 registers structure. */
227 CORE_ADDR address;
228
229 /* Pointer to internal variable. */
230 struct internalvar *internalvar;
231
232 /* Pointer to xmethod worker. */
233 struct xmethod_worker *xm_worker;
234
235 /* If lval == lval_computed, this is a set of function pointers
236 to use to access and describe the value, and a closure pointer
237 for them to use. */
238 struct
239 {
240 /* Functions to call. */
241 const struct lval_funcs *funcs;
242
243 /* Closure for those functions to use. */
244 void *closure;
245 } computed;
246 } location;
247
248 /* Describes offset of a value within lval of a structure in bytes.
249 If lval == lval_memory, this is an offset to the address. If
250 lval == lval_register, this is a further offset from
251 location.address within the registers structure. Note also the
252 member embedded_offset below. */
253 int offset;
254
255 /* Only used for bitfields; number of bits contained in them. */
256 int bitsize;
257
258 /* Only used for bitfields; position of start of field. For
259 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
260 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
261 int bitpos;
262
263 /* The number of references to this value. When a value is created,
264 the value chain holds a reference, so REFERENCE_COUNT is 1. If
265 release_value is called, this value is removed from the chain but
266 the caller of release_value now has a reference to this value.
267 The caller must arrange for a call to value_free later. */
268 int reference_count;
269
270 /* Only used for bitfields; the containing value. This allows a
271 single read from the target when displaying multiple
272 bitfields. */
273 struct value *parent;
274
275 /* Frame register value is relative to. This will be described in
276 the lval enum above as "lval_register". */
277 struct frame_id frame_id;
278
279 /* Type of the value. */
280 struct type *type;
281
282 /* If a value represents a C++ object, then the `type' field gives
283 the object's compile-time type. If the object actually belongs
284 to some class derived from `type', perhaps with other base
285 classes and additional members, then `type' is just a subobject
286 of the real thing, and the full object is probably larger than
287 `type' would suggest.
288
289 If `type' is a dynamic class (i.e. one with a vtable), then GDB
290 can actually determine the object's run-time type by looking at
291 the run-time type information in the vtable. When this
292 information is available, we may elect to read in the entire
293 object, for several reasons:
294
295 - When printing the value, the user would probably rather see the
296 full object, not just the limited portion apparent from the
297 compile-time type.
298
299 - If `type' has virtual base classes, then even printing `type'
300 alone may require reaching outside the `type' portion of the
301 object to wherever the virtual base class has been stored.
302
303 When we store the entire object, `enclosing_type' is the run-time
304 type -- the complete object -- and `embedded_offset' is the
305 offset of `type' within that larger type, in bytes. The
306 value_contents() macro takes `embedded_offset' into account, so
307 most GDB code continues to see the `type' portion of the value,
308 just as the inferior would.
309
310 If `type' is a pointer to an object, then `enclosing_type' is a
311 pointer to the object's run-time type, and `pointed_to_offset' is
312 the offset in bytes from the full object to the pointed-to object
313 -- that is, the value `embedded_offset' would have if we followed
314 the pointer and fetched the complete object. (I don't really see
315 the point. Why not just determine the run-time type when you
316 indirect, and avoid the special case? The contents don't matter
317 until you indirect anyway.)
318
319 If we're not doing anything fancy, `enclosing_type' is equal to
320 `type', and `embedded_offset' is zero, so everything works
321 normally. */
322 struct type *enclosing_type;
323 int embedded_offset;
324 int pointed_to_offset;
325
326 /* Values are stored in a chain, so that they can be deleted easily
327 over calls to the inferior. Values assigned to internal
328 variables, put into the value history or exposed to Python are
329 taken off this list. */
330 struct value *next;
331
332 /* Actual contents of the value. Target byte-order. NULL or not
333 valid if lazy is nonzero. */
334 gdb_byte *contents;
335
336 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
337 rather than available, since the common and default case is for a
338 value to be available. This is filled in at value read time. The
339 unavailable ranges are tracked in bits. */
340 VEC(range_s) *unavailable;
341 };
342
343 int
344 value_bits_available (const struct value *value, int offset, int length)
345 {
346 gdb_assert (!value->lazy);
347
348 return !ranges_contain (value->unavailable, offset, length);
349 }
350
351 int
352 value_bytes_available (const struct value *value, int offset, int length)
353 {
354 return value_bits_available (value,
355 offset * TARGET_CHAR_BIT,
356 length * TARGET_CHAR_BIT);
357 }
358
359 int
360 value_entirely_available (struct value *value)
361 {
362 /* We can only tell whether the whole value is available when we try
363 to read it. */
364 if (value->lazy)
365 value_fetch_lazy (value);
366
367 if (VEC_empty (range_s, value->unavailable))
368 return 1;
369 return 0;
370 }
371
372 int
373 value_entirely_unavailable (struct value *value)
374 {
375 /* We can only tell whether the whole value is available when we try
376 to read it. */
377 if (value->lazy)
378 value_fetch_lazy (value);
379
380 if (VEC_length (range_s, value->unavailable) == 1)
381 {
382 struct range *t = VEC_index (range_s, value->unavailable, 0);
383
384 if (t->offset == 0
385 && t->length == (TARGET_CHAR_BIT
386 * TYPE_LENGTH (value_enclosing_type (value))))
387 return 1;
388 }
389
390 return 0;
391 }
392
393 void
394 mark_value_bits_unavailable (struct value *value, int offset, int length)
395 {
396 range_s newr;
397 int i;
398
399 /* Insert the range sorted. If there's overlap or the new range
400 would be contiguous with an existing range, merge. */
401
402 newr.offset = offset;
403 newr.length = length;
404
405 /* Do a binary search for the position the given range would be
406 inserted if we only considered the starting OFFSET of ranges.
407 Call that position I. Since we also have LENGTH to care for
408 (this is a range afterall), we need to check if the _previous_
409 range overlaps the I range. E.g., calling R the new range:
410
411 #1 - overlaps with previous
412
413 R
414 |-...-|
415 |---| |---| |------| ... |--|
416 0 1 2 N
417
418 I=1
419
420 In the case #1 above, the binary search would return `I=1',
421 meaning, this OFFSET should be inserted at position 1, and the
422 current position 1 should be pushed further (and become 2). But,
423 note that `0' overlaps with R, so we want to merge them.
424
425 A similar consideration needs to be taken if the new range would
426 be contiguous with the previous range:
427
428 #2 - contiguous with previous
429
430 R
431 |-...-|
432 |--| |---| |------| ... |--|
433 0 1 2 N
434
435 I=1
436
437 If there's no overlap with the previous range, as in:
438
439 #3 - not overlapping and not contiguous
440
441 R
442 |-...-|
443 |--| |---| |------| ... |--|
444 0 1 2 N
445
446 I=1
447
448 or if I is 0:
449
450 #4 - R is the range with lowest offset
451
452 R
453 |-...-|
454 |--| |---| |------| ... |--|
455 0 1 2 N
456
457 I=0
458
459 ... we just push the new range to I.
460
461 All the 4 cases above need to consider that the new range may
462 also overlap several of the ranges that follow, or that R may be
463 contiguous with the following range, and merge. E.g.,
464
465 #5 - overlapping following ranges
466
467 R
468 |------------------------|
469 |--| |---| |------| ... |--|
470 0 1 2 N
471
472 I=0
473
474 or:
475
476 R
477 |-------|
478 |--| |---| |------| ... |--|
479 0 1 2 N
480
481 I=1
482
483 */
484
485 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
486 if (i > 0)
487 {
488 struct range *bef = VEC_index (range_s, value->unavailable, i - 1);
489
490 if (ranges_overlap (bef->offset, bef->length, offset, length))
491 {
492 /* #1 */
493 ULONGEST l = min (bef->offset, offset);
494 ULONGEST h = max (bef->offset + bef->length, offset + length);
495
496 bef->offset = l;
497 bef->length = h - l;
498 i--;
499 }
500 else if (offset == bef->offset + bef->length)
501 {
502 /* #2 */
503 bef->length += length;
504 i--;
505 }
506 else
507 {
508 /* #3 */
509 VEC_safe_insert (range_s, value->unavailable, i, &newr);
510 }
511 }
512 else
513 {
514 /* #4 */
515 VEC_safe_insert (range_s, value->unavailable, i, &newr);
516 }
517
518 /* Check whether the ranges following the one we've just added or
519 touched can be folded in (#5 above). */
520 if (i + 1 < VEC_length (range_s, value->unavailable))
521 {
522 struct range *t;
523 struct range *r;
524 int removed = 0;
525 int next = i + 1;
526
527 /* Get the range we just touched. */
528 t = VEC_index (range_s, value->unavailable, i);
529 removed = 0;
530
531 i = next;
532 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
533 if (r->offset <= t->offset + t->length)
534 {
535 ULONGEST l, h;
536
537 l = min (t->offset, r->offset);
538 h = max (t->offset + t->length, r->offset + r->length);
539
540 t->offset = l;
541 t->length = h - l;
542
543 removed++;
544 }
545 else
546 {
547 /* If we couldn't merge this one, we won't be able to
548 merge following ones either, since the ranges are
549 always sorted by OFFSET. */
550 break;
551 }
552
553 if (removed != 0)
554 VEC_block_remove (range_s, value->unavailable, next, removed);
555 }
556 }
557
558 void
559 mark_value_bytes_unavailable (struct value *value, int offset, int length)
560 {
561 mark_value_bits_unavailable (value,
562 offset * TARGET_CHAR_BIT,
563 length * TARGET_CHAR_BIT);
564 }
565
566 /* Find the first range in RANGES that overlaps the range defined by
567 OFFSET and LENGTH, starting at element POS in the RANGES vector,
568 Returns the index into RANGES where such overlapping range was
569 found, or -1 if none was found. */
570
571 static int
572 find_first_range_overlap (VEC(range_s) *ranges, int pos,
573 int offset, int length)
574 {
575 range_s *r;
576 int i;
577
578 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
579 if (ranges_overlap (r->offset, r->length, offset, length))
580 return i;
581
582 return -1;
583 }
584
585 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
586 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
587 return non-zero.
588
589 It must always be the case that:
590 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
591
592 It is assumed that memory can be accessed from:
593 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
594 to:
595 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
596 / TARGET_CHAR_BIT) */
597 static int
598 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
599 const gdb_byte *ptr2, size_t offset2_bits,
600 size_t length_bits)
601 {
602 gdb_assert (offset1_bits % TARGET_CHAR_BIT
603 == offset2_bits % TARGET_CHAR_BIT);
604
605 if (offset1_bits % TARGET_CHAR_BIT != 0)
606 {
607 size_t bits;
608 gdb_byte mask, b1, b2;
609
610 /* The offset from the base pointers PTR1 and PTR2 is not a complete
611 number of bytes. A number of bits up to either the next exact
612 byte boundary, or LENGTH_BITS (which ever is sooner) will be
613 compared. */
614 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
615 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
616 mask = (1 << bits) - 1;
617
618 if (length_bits < bits)
619 {
620 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
621 bits = length_bits;
622 }
623
624 /* Now load the two bytes and mask off the bits we care about. */
625 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
626 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
627
628 if (b1 != b2)
629 return 1;
630
631 /* Now update the length and offsets to take account of the bits
632 we've just compared. */
633 length_bits -= bits;
634 offset1_bits += bits;
635 offset2_bits += bits;
636 }
637
638 if (length_bits % TARGET_CHAR_BIT != 0)
639 {
640 size_t bits;
641 size_t o1, o2;
642 gdb_byte mask, b1, b2;
643
644 /* The length is not an exact number of bytes. After the previous
645 IF.. block then the offsets are byte aligned, or the
646 length is zero (in which case this code is not reached). Compare
647 a number of bits at the end of the region, starting from an exact
648 byte boundary. */
649 bits = length_bits % TARGET_CHAR_BIT;
650 o1 = offset1_bits + length_bits - bits;
651 o2 = offset2_bits + length_bits - bits;
652
653 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
654 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
655
656 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
657 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
658
659 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
660 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
661
662 if (b1 != b2)
663 return 1;
664
665 length_bits -= bits;
666 }
667
668 if (length_bits > 0)
669 {
670 /* We've now taken care of any stray "bits" at the start, or end of
671 the region to compare, the remainder can be covered with a simple
672 memcmp. */
673 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
674 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
675 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
676
677 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
678 ptr2 + offset2_bits / TARGET_CHAR_BIT,
679 length_bits / TARGET_CHAR_BIT);
680 }
681
682 /* Length is zero, regions match. */
683 return 0;
684 }
685
686 /* Helper function for value_available_contents_eq. The only difference is
687 that this function is bit rather than byte based.
688
689 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits with
690 LENGTH bits of VAL2's contents starting at OFFSET2 bits. Return true
691 if the available bits match. */
692
693 static int
694 value_available_contents_bits_eq (const struct value *val1, int offset1,
695 const struct value *val2, int offset2,
696 int length)
697 {
698 int idx1 = 0, idx2 = 0;
699
700 /* See function description in value.h. */
701 gdb_assert (!val1->lazy && !val2->lazy);
702
703 while (length > 0)
704 {
705 range_s *r1, *r2;
706 ULONGEST l1, h1;
707 ULONGEST l2, h2;
708
709 idx1 = find_first_range_overlap (val1->unavailable, idx1,
710 offset1, length);
711 idx2 = find_first_range_overlap (val2->unavailable, idx2,
712 offset2, length);
713
714 /* The usual case is for both values to be completely available. */
715 if (idx1 == -1 && idx2 == -1)
716 return (memcmp_with_bit_offsets (val1->contents, offset1,
717 val2->contents, offset2,
718 length) == 0);
719 /* The contents only match equal if the available set matches as
720 well. */
721 else if (idx1 == -1 || idx2 == -1)
722 return 0;
723
724 gdb_assert (idx1 != -1 && idx2 != -1);
725
726 r1 = VEC_index (range_s, val1->unavailable, idx1);
727 r2 = VEC_index (range_s, val2->unavailable, idx2);
728
729 /* Get the unavailable windows intersected by the incoming
730 ranges. The first and last ranges that overlap the argument
731 range may be wider than said incoming arguments ranges. */
732 l1 = max (offset1, r1->offset);
733 h1 = min (offset1 + length, r1->offset + r1->length);
734
735 l2 = max (offset2, r2->offset);
736 h2 = min (offset2 + length, r2->offset + r2->length);
737
738 /* Make them relative to the respective start offsets, so we can
739 compare them for equality. */
740 l1 -= offset1;
741 h1 -= offset1;
742
743 l2 -= offset2;
744 h2 -= offset2;
745
746 /* Different availability, no match. */
747 if (l1 != l2 || h1 != h2)
748 return 0;
749
750 /* Compare the _available_ contents. */
751 if (memcmp_with_bit_offsets (val1->contents, offset1,
752 val2->contents, offset2, l1) != 0)
753 return 0;
754
755 length -= h1;
756 offset1 += h1;
757 offset2 += h1;
758 }
759
760 return 1;
761 }
762
763 int
764 value_available_contents_eq (const struct value *val1, int offset1,
765 const struct value *val2, int offset2,
766 int length)
767 {
768 return value_available_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
769 val2, offset2 * TARGET_CHAR_BIT,
770 length * TARGET_CHAR_BIT);
771 }
772
773 /* Prototypes for local functions. */
774
775 static void show_values (char *, int);
776
777 static void show_convenience (char *, int);
778
779
780 /* The value-history records all the values printed
781 by print commands during this session. Each chunk
782 records 60 consecutive values. The first chunk on
783 the chain records the most recent values.
784 The total number of values is in value_history_count. */
785
786 #define VALUE_HISTORY_CHUNK 60
787
788 struct value_history_chunk
789 {
790 struct value_history_chunk *next;
791 struct value *values[VALUE_HISTORY_CHUNK];
792 };
793
794 /* Chain of chunks now in use. */
795
796 static struct value_history_chunk *value_history_chain;
797
798 static int value_history_count; /* Abs number of last entry stored. */
799
800 \f
801 /* List of all value objects currently allocated
802 (except for those released by calls to release_value)
803 This is so they can be freed after each command. */
804
805 static struct value *all_values;
806
807 /* Allocate a lazy value for type TYPE. Its actual content is
808 "lazily" allocated too: the content field of the return value is
809 NULL; it will be allocated when it is fetched from the target. */
810
811 struct value *
812 allocate_value_lazy (struct type *type)
813 {
814 struct value *val;
815
816 /* Call check_typedef on our type to make sure that, if TYPE
817 is a TYPE_CODE_TYPEDEF, its length is set to the length
818 of the target type instead of zero. However, we do not
819 replace the typedef type by the target type, because we want
820 to keep the typedef in order to be able to set the VAL's type
821 description correctly. */
822 check_typedef (type);
823
824 val = (struct value *) xzalloc (sizeof (struct value));
825 val->contents = NULL;
826 val->next = all_values;
827 all_values = val;
828 val->type = type;
829 val->enclosing_type = type;
830 VALUE_LVAL (val) = not_lval;
831 val->location.address = 0;
832 VALUE_FRAME_ID (val) = null_frame_id;
833 val->offset = 0;
834 val->bitpos = 0;
835 val->bitsize = 0;
836 VALUE_REGNUM (val) = -1;
837 val->lazy = 1;
838 val->optimized_out = 0;
839 val->embedded_offset = 0;
840 val->pointed_to_offset = 0;
841 val->modifiable = 1;
842 val->initialized = 1; /* Default to initialized. */
843
844 /* Values start out on the all_values chain. */
845 val->reference_count = 1;
846
847 return val;
848 }
849
850 /* Allocate the contents of VAL if it has not been allocated yet. */
851
852 static void
853 allocate_value_contents (struct value *val)
854 {
855 if (!val->contents)
856 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
857 }
858
859 /* Allocate a value and its contents for type TYPE. */
860
861 struct value *
862 allocate_value (struct type *type)
863 {
864 struct value *val = allocate_value_lazy (type);
865
866 allocate_value_contents (val);
867 val->lazy = 0;
868 return val;
869 }
870
871 /* Allocate a value that has the correct length
872 for COUNT repetitions of type TYPE. */
873
874 struct value *
875 allocate_repeat_value (struct type *type, int count)
876 {
877 int low_bound = current_language->string_lower_bound; /* ??? */
878 /* FIXME-type-allocation: need a way to free this type when we are
879 done with it. */
880 struct type *array_type
881 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
882
883 return allocate_value (array_type);
884 }
885
886 struct value *
887 allocate_computed_value (struct type *type,
888 const struct lval_funcs *funcs,
889 void *closure)
890 {
891 struct value *v = allocate_value_lazy (type);
892
893 VALUE_LVAL (v) = lval_computed;
894 v->location.computed.funcs = funcs;
895 v->location.computed.closure = closure;
896
897 return v;
898 }
899
900 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
901
902 struct value *
903 allocate_optimized_out_value (struct type *type)
904 {
905 struct value *retval = allocate_value_lazy (type);
906
907 set_value_optimized_out (retval, 1);
908 /* FIXME: we should be able to avoid allocating the value's contents
909 buffer, but value_available_contents_bits_eq can't handle
910 that. */
911 /* set_value_lazy (retval, 0); */
912 return retval;
913 }
914
915 /* Accessor methods. */
916
917 struct value *
918 value_next (struct value *value)
919 {
920 return value->next;
921 }
922
923 struct type *
924 value_type (const struct value *value)
925 {
926 return value->type;
927 }
928 void
929 deprecated_set_value_type (struct value *value, struct type *type)
930 {
931 value->type = type;
932 }
933
934 int
935 value_offset (const struct value *value)
936 {
937 return value->offset;
938 }
939 void
940 set_value_offset (struct value *value, int offset)
941 {
942 value->offset = offset;
943 }
944
945 int
946 value_bitpos (const struct value *value)
947 {
948 return value->bitpos;
949 }
950 void
951 set_value_bitpos (struct value *value, int bit)
952 {
953 value->bitpos = bit;
954 }
955
956 int
957 value_bitsize (const struct value *value)
958 {
959 return value->bitsize;
960 }
961 void
962 set_value_bitsize (struct value *value, int bit)
963 {
964 value->bitsize = bit;
965 }
966
967 struct value *
968 value_parent (struct value *value)
969 {
970 return value->parent;
971 }
972
973 /* See value.h. */
974
975 void
976 set_value_parent (struct value *value, struct value *parent)
977 {
978 struct value *old = value->parent;
979
980 value->parent = parent;
981 if (parent != NULL)
982 value_incref (parent);
983 value_free (old);
984 }
985
986 gdb_byte *
987 value_contents_raw (struct value *value)
988 {
989 allocate_value_contents (value);
990 return value->contents + value->embedded_offset;
991 }
992
993 gdb_byte *
994 value_contents_all_raw (struct value *value)
995 {
996 allocate_value_contents (value);
997 return value->contents;
998 }
999
1000 struct type *
1001 value_enclosing_type (struct value *value)
1002 {
1003 return value->enclosing_type;
1004 }
1005
1006 /* Look at value.h for description. */
1007
1008 struct type *
1009 value_actual_type (struct value *value, int resolve_simple_types,
1010 int *real_type_found)
1011 {
1012 struct value_print_options opts;
1013 struct type *result;
1014
1015 get_user_print_options (&opts);
1016
1017 if (real_type_found)
1018 *real_type_found = 0;
1019 result = value_type (value);
1020 if (opts.objectprint)
1021 {
1022 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1023 fetch its rtti type. */
1024 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1025 || TYPE_CODE (result) == TYPE_CODE_REF)
1026 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1027 == TYPE_CODE_STRUCT)
1028 {
1029 struct type *real_type;
1030
1031 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1032 if (real_type)
1033 {
1034 if (real_type_found)
1035 *real_type_found = 1;
1036 result = real_type;
1037 }
1038 }
1039 else if (resolve_simple_types)
1040 {
1041 if (real_type_found)
1042 *real_type_found = 1;
1043 result = value_enclosing_type (value);
1044 }
1045 }
1046
1047 return result;
1048 }
1049
1050 void
1051 error_value_optimized_out (void)
1052 {
1053 error (_("value has been optimized out"));
1054 }
1055
1056 static void
1057 require_not_optimized_out (const struct value *value)
1058 {
1059 if (value->optimized_out)
1060 {
1061 if (value->lval == lval_register)
1062 error (_("register has not been saved in frame"));
1063 else
1064 error_value_optimized_out ();
1065 }
1066 }
1067
1068 static void
1069 require_available (const struct value *value)
1070 {
1071 if (!VEC_empty (range_s, value->unavailable))
1072 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1073 }
1074
1075 const gdb_byte *
1076 value_contents_for_printing (struct value *value)
1077 {
1078 if (value->lazy)
1079 value_fetch_lazy (value);
1080 return value->contents;
1081 }
1082
1083 const gdb_byte *
1084 value_contents_for_printing_const (const struct value *value)
1085 {
1086 gdb_assert (!value->lazy);
1087 return value->contents;
1088 }
1089
1090 const gdb_byte *
1091 value_contents_all (struct value *value)
1092 {
1093 const gdb_byte *result = value_contents_for_printing (value);
1094 require_not_optimized_out (value);
1095 require_available (value);
1096 return result;
1097 }
1098
1099 /* Copy LENGTH bytes of SRC value's (all) contents
1100 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1101 contents, starting at DST_OFFSET. If unavailable contents are
1102 being copied from SRC, the corresponding DST contents are marked
1103 unavailable accordingly. Neither DST nor SRC may be lazy
1104 values.
1105
1106 It is assumed the contents of DST in the [DST_OFFSET,
1107 DST_OFFSET+LENGTH) range are wholly available. */
1108
1109 void
1110 value_contents_copy_raw (struct value *dst, int dst_offset,
1111 struct value *src, int src_offset, int length)
1112 {
1113 range_s *r;
1114 int i;
1115 int src_bit_offset, dst_bit_offset, bit_length;
1116
1117 /* A lazy DST would make that this copy operation useless, since as
1118 soon as DST's contents were un-lazied (by a later value_contents
1119 call, say), the contents would be overwritten. A lazy SRC would
1120 mean we'd be copying garbage. */
1121 gdb_assert (!dst->lazy && !src->lazy);
1122
1123 /* The overwritten DST range gets unavailability ORed in, not
1124 replaced. Make sure to remember to implement replacing if it
1125 turns out actually necessary. */
1126 gdb_assert (value_bytes_available (dst, dst_offset, length));
1127
1128 /* Copy the data. */
1129 memcpy (value_contents_all_raw (dst) + dst_offset,
1130 value_contents_all_raw (src) + src_offset,
1131 length);
1132
1133 /* Copy the meta-data, adjusted. */
1134 src_bit_offset = src_offset * TARGET_CHAR_BIT;
1135 dst_bit_offset = dst_offset * TARGET_CHAR_BIT;
1136 bit_length = length * TARGET_CHAR_BIT;
1137 for (i = 0; VEC_iterate (range_s, src->unavailable, i, r); i++)
1138 {
1139 ULONGEST h, l;
1140
1141 l = max (r->offset, src_bit_offset);
1142 h = min (r->offset + r->length, src_bit_offset + bit_length);
1143
1144 if (l < h)
1145 mark_value_bits_unavailable (dst,
1146 dst_bit_offset + (l - src_bit_offset),
1147 h - l);
1148 }
1149 }
1150
1151 /* Copy LENGTH bytes of SRC value's (all) contents
1152 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1153 (all) contents, starting at DST_OFFSET. If unavailable contents
1154 are being copied from SRC, the corresponding DST contents are
1155 marked unavailable accordingly. DST must not be lazy. If SRC is
1156 lazy, it will be fetched now. If SRC is not valid (is optimized
1157 out), an error is thrown.
1158
1159 It is assumed the contents of DST in the [DST_OFFSET,
1160 DST_OFFSET+LENGTH) range are wholly available. */
1161
1162 void
1163 value_contents_copy (struct value *dst, int dst_offset,
1164 struct value *src, int src_offset, int length)
1165 {
1166 require_not_optimized_out (src);
1167
1168 if (src->lazy)
1169 value_fetch_lazy (src);
1170
1171 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1172 }
1173
1174 int
1175 value_lazy (struct value *value)
1176 {
1177 return value->lazy;
1178 }
1179
1180 void
1181 set_value_lazy (struct value *value, int val)
1182 {
1183 value->lazy = val;
1184 }
1185
1186 int
1187 value_stack (struct value *value)
1188 {
1189 return value->stack;
1190 }
1191
1192 void
1193 set_value_stack (struct value *value, int val)
1194 {
1195 value->stack = val;
1196 }
1197
1198 const gdb_byte *
1199 value_contents (struct value *value)
1200 {
1201 const gdb_byte *result = value_contents_writeable (value);
1202 require_not_optimized_out (value);
1203 require_available (value);
1204 return result;
1205 }
1206
1207 gdb_byte *
1208 value_contents_writeable (struct value *value)
1209 {
1210 if (value->lazy)
1211 value_fetch_lazy (value);
1212 return value_contents_raw (value);
1213 }
1214
1215 int
1216 value_optimized_out (struct value *value)
1217 {
1218 /* We can only know if a value is optimized out once we have tried to
1219 fetch it. */
1220 if (!value->optimized_out && value->lazy)
1221 value_fetch_lazy (value);
1222
1223 return value->optimized_out;
1224 }
1225
1226 int
1227 value_optimized_out_const (const struct value *value)
1228 {
1229 return value->optimized_out;
1230 }
1231
1232 void
1233 set_value_optimized_out (struct value *value, int val)
1234 {
1235 value->optimized_out = val;
1236 }
1237
1238 int
1239 value_entirely_optimized_out (const struct value *value)
1240 {
1241 if (!value->optimized_out)
1242 return 0;
1243 if (value->lval != lval_computed
1244 || !value->location.computed.funcs->check_any_valid)
1245 return 1;
1246 return !value->location.computed.funcs->check_any_valid (value);
1247 }
1248
1249 int
1250 value_bits_valid (const struct value *value, int offset, int length)
1251 {
1252 if (!value->optimized_out)
1253 return 1;
1254 if (value->lval != lval_computed
1255 || !value->location.computed.funcs->check_validity)
1256 return 0;
1257 return value->location.computed.funcs->check_validity (value, offset,
1258 length);
1259 }
1260
1261 int
1262 value_bits_synthetic_pointer (const struct value *value,
1263 int offset, int length)
1264 {
1265 if (value->lval != lval_computed
1266 || !value->location.computed.funcs->check_synthetic_pointer)
1267 return 0;
1268 return value->location.computed.funcs->check_synthetic_pointer (value,
1269 offset,
1270 length);
1271 }
1272
1273 int
1274 value_embedded_offset (struct value *value)
1275 {
1276 return value->embedded_offset;
1277 }
1278
1279 void
1280 set_value_embedded_offset (struct value *value, int val)
1281 {
1282 value->embedded_offset = val;
1283 }
1284
1285 int
1286 value_pointed_to_offset (struct value *value)
1287 {
1288 return value->pointed_to_offset;
1289 }
1290
1291 void
1292 set_value_pointed_to_offset (struct value *value, int val)
1293 {
1294 value->pointed_to_offset = val;
1295 }
1296
1297 const struct lval_funcs *
1298 value_computed_funcs (const struct value *v)
1299 {
1300 gdb_assert (value_lval_const (v) == lval_computed);
1301
1302 return v->location.computed.funcs;
1303 }
1304
1305 void *
1306 value_computed_closure (const struct value *v)
1307 {
1308 gdb_assert (v->lval == lval_computed);
1309
1310 return v->location.computed.closure;
1311 }
1312
1313 enum lval_type *
1314 deprecated_value_lval_hack (struct value *value)
1315 {
1316 return &value->lval;
1317 }
1318
1319 enum lval_type
1320 value_lval_const (const struct value *value)
1321 {
1322 return value->lval;
1323 }
1324
1325 CORE_ADDR
1326 value_address (const struct value *value)
1327 {
1328 if (value->lval == lval_internalvar
1329 || value->lval == lval_internalvar_component
1330 || value->lval == lval_xcallable)
1331 return 0;
1332 if (value->parent != NULL)
1333 return value_address (value->parent) + value->offset;
1334 else
1335 return value->location.address + value->offset;
1336 }
1337
1338 CORE_ADDR
1339 value_raw_address (struct value *value)
1340 {
1341 if (value->lval == lval_internalvar
1342 || value->lval == lval_internalvar_component
1343 || value->lval == lval_xcallable)
1344 return 0;
1345 return value->location.address;
1346 }
1347
1348 void
1349 set_value_address (struct value *value, CORE_ADDR addr)
1350 {
1351 gdb_assert (value->lval != lval_internalvar
1352 && value->lval != lval_internalvar_component
1353 && value->lval != lval_xcallable);
1354 value->location.address = addr;
1355 }
1356
1357 struct internalvar **
1358 deprecated_value_internalvar_hack (struct value *value)
1359 {
1360 return &value->location.internalvar;
1361 }
1362
1363 struct frame_id *
1364 deprecated_value_frame_id_hack (struct value *value)
1365 {
1366 return &value->frame_id;
1367 }
1368
1369 short *
1370 deprecated_value_regnum_hack (struct value *value)
1371 {
1372 return &value->regnum;
1373 }
1374
1375 int
1376 deprecated_value_modifiable (struct value *value)
1377 {
1378 return value->modifiable;
1379 }
1380 \f
1381 /* Return a mark in the value chain. All values allocated after the
1382 mark is obtained (except for those released) are subject to being freed
1383 if a subsequent value_free_to_mark is passed the mark. */
1384 struct value *
1385 value_mark (void)
1386 {
1387 return all_values;
1388 }
1389
1390 /* Take a reference to VAL. VAL will not be deallocated until all
1391 references are released. */
1392
1393 void
1394 value_incref (struct value *val)
1395 {
1396 val->reference_count++;
1397 }
1398
1399 /* Release a reference to VAL, which was acquired with value_incref.
1400 This function is also called to deallocate values from the value
1401 chain. */
1402
1403 void
1404 value_free (struct value *val)
1405 {
1406 if (val)
1407 {
1408 gdb_assert (val->reference_count > 0);
1409 val->reference_count--;
1410 if (val->reference_count > 0)
1411 return;
1412
1413 /* If there's an associated parent value, drop our reference to
1414 it. */
1415 if (val->parent != NULL)
1416 value_free (val->parent);
1417
1418 if (VALUE_LVAL (val) == lval_computed)
1419 {
1420 const struct lval_funcs *funcs = val->location.computed.funcs;
1421
1422 if (funcs->free_closure)
1423 funcs->free_closure (val);
1424 }
1425 else if (VALUE_LVAL (val) == lval_xcallable)
1426 free_xmethod_worker (val->location.xm_worker);
1427
1428 xfree (val->contents);
1429 VEC_free (range_s, val->unavailable);
1430 }
1431 xfree (val);
1432 }
1433
1434 /* Free all values allocated since MARK was obtained by value_mark
1435 (except for those released). */
1436 void
1437 value_free_to_mark (struct value *mark)
1438 {
1439 struct value *val;
1440 struct value *next;
1441
1442 for (val = all_values; val && val != mark; val = next)
1443 {
1444 next = val->next;
1445 val->released = 1;
1446 value_free (val);
1447 }
1448 all_values = val;
1449 }
1450
1451 /* Free all the values that have been allocated (except for those released).
1452 Call after each command, successful or not.
1453 In practice this is called before each command, which is sufficient. */
1454
1455 void
1456 free_all_values (void)
1457 {
1458 struct value *val;
1459 struct value *next;
1460
1461 for (val = all_values; val; val = next)
1462 {
1463 next = val->next;
1464 val->released = 1;
1465 value_free (val);
1466 }
1467
1468 all_values = 0;
1469 }
1470
1471 /* Frees all the elements in a chain of values. */
1472
1473 void
1474 free_value_chain (struct value *v)
1475 {
1476 struct value *next;
1477
1478 for (; v; v = next)
1479 {
1480 next = value_next (v);
1481 value_free (v);
1482 }
1483 }
1484
1485 /* Remove VAL from the chain all_values
1486 so it will not be freed automatically. */
1487
1488 void
1489 release_value (struct value *val)
1490 {
1491 struct value *v;
1492
1493 if (all_values == val)
1494 {
1495 all_values = val->next;
1496 val->next = NULL;
1497 val->released = 1;
1498 return;
1499 }
1500
1501 for (v = all_values; v; v = v->next)
1502 {
1503 if (v->next == val)
1504 {
1505 v->next = val->next;
1506 val->next = NULL;
1507 val->released = 1;
1508 break;
1509 }
1510 }
1511 }
1512
1513 /* If the value is not already released, release it.
1514 If the value is already released, increment its reference count.
1515 That is, this function ensures that the value is released from the
1516 value chain and that the caller owns a reference to it. */
1517
1518 void
1519 release_value_or_incref (struct value *val)
1520 {
1521 if (val->released)
1522 value_incref (val);
1523 else
1524 release_value (val);
1525 }
1526
1527 /* Release all values up to mark */
1528 struct value *
1529 value_release_to_mark (struct value *mark)
1530 {
1531 struct value *val;
1532 struct value *next;
1533
1534 for (val = next = all_values; next; next = next->next)
1535 {
1536 if (next->next == mark)
1537 {
1538 all_values = next->next;
1539 next->next = NULL;
1540 return val;
1541 }
1542 next->released = 1;
1543 }
1544 all_values = 0;
1545 return val;
1546 }
1547
1548 /* Return a copy of the value ARG.
1549 It contains the same contents, for same memory address,
1550 but it's a different block of storage. */
1551
1552 struct value *
1553 value_copy (struct value *arg)
1554 {
1555 struct type *encl_type = value_enclosing_type (arg);
1556 struct value *val;
1557
1558 if (value_lazy (arg))
1559 val = allocate_value_lazy (encl_type);
1560 else
1561 val = allocate_value (encl_type);
1562 val->type = arg->type;
1563 VALUE_LVAL (val) = VALUE_LVAL (arg);
1564 val->location = arg->location;
1565 val->offset = arg->offset;
1566 val->bitpos = arg->bitpos;
1567 val->bitsize = arg->bitsize;
1568 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1569 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1570 val->lazy = arg->lazy;
1571 val->optimized_out = arg->optimized_out;
1572 val->embedded_offset = value_embedded_offset (arg);
1573 val->pointed_to_offset = arg->pointed_to_offset;
1574 val->modifiable = arg->modifiable;
1575 if (!value_lazy (val))
1576 {
1577 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1578 TYPE_LENGTH (value_enclosing_type (arg)));
1579
1580 }
1581 val->unavailable = VEC_copy (range_s, arg->unavailable);
1582 set_value_parent (val, arg->parent);
1583 if (VALUE_LVAL (val) == lval_computed)
1584 {
1585 const struct lval_funcs *funcs = val->location.computed.funcs;
1586
1587 if (funcs->copy_closure)
1588 val->location.computed.closure = funcs->copy_closure (val);
1589 }
1590 return val;
1591 }
1592
1593 /* Return a version of ARG that is non-lvalue. */
1594
1595 struct value *
1596 value_non_lval (struct value *arg)
1597 {
1598 if (VALUE_LVAL (arg) != not_lval)
1599 {
1600 struct type *enc_type = value_enclosing_type (arg);
1601 struct value *val = allocate_value (enc_type);
1602
1603 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1604 TYPE_LENGTH (enc_type));
1605 val->type = arg->type;
1606 set_value_embedded_offset (val, value_embedded_offset (arg));
1607 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1608 return val;
1609 }
1610 return arg;
1611 }
1612
1613 void
1614 set_value_component_location (struct value *component,
1615 const struct value *whole)
1616 {
1617 gdb_assert (whole->lval != lval_xcallable);
1618
1619 if (whole->lval == lval_internalvar)
1620 VALUE_LVAL (component) = lval_internalvar_component;
1621 else
1622 VALUE_LVAL (component) = whole->lval;
1623
1624 component->location = whole->location;
1625 if (whole->lval == lval_computed)
1626 {
1627 const struct lval_funcs *funcs = whole->location.computed.funcs;
1628
1629 if (funcs->copy_closure)
1630 component->location.computed.closure = funcs->copy_closure (whole);
1631 }
1632 }
1633
1634 \f
1635 /* Access to the value history. */
1636
1637 /* Record a new value in the value history.
1638 Returns the absolute history index of the entry. */
1639
1640 int
1641 record_latest_value (struct value *val)
1642 {
1643 int i;
1644
1645 /* We don't want this value to have anything to do with the inferior anymore.
1646 In particular, "set $1 = 50" should not affect the variable from which
1647 the value was taken, and fast watchpoints should be able to assume that
1648 a value on the value history never changes. */
1649 if (value_lazy (val))
1650 value_fetch_lazy (val);
1651 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1652 from. This is a bit dubious, because then *&$1 does not just return $1
1653 but the current contents of that location. c'est la vie... */
1654 val->modifiable = 0;
1655
1656 /* The value may have already been released, in which case we're adding a
1657 new reference for its entry in the history. That is why we call
1658 release_value_or_incref here instead of release_value. */
1659 release_value_or_incref (val);
1660
1661 /* Here we treat value_history_count as origin-zero
1662 and applying to the value being stored now. */
1663
1664 i = value_history_count % VALUE_HISTORY_CHUNK;
1665 if (i == 0)
1666 {
1667 struct value_history_chunk *new
1668 = (struct value_history_chunk *)
1669
1670 xmalloc (sizeof (struct value_history_chunk));
1671 memset (new->values, 0, sizeof new->values);
1672 new->next = value_history_chain;
1673 value_history_chain = new;
1674 }
1675
1676 value_history_chain->values[i] = val;
1677
1678 /* Now we regard value_history_count as origin-one
1679 and applying to the value just stored. */
1680
1681 return ++value_history_count;
1682 }
1683
1684 /* Return a copy of the value in the history with sequence number NUM. */
1685
1686 struct value *
1687 access_value_history (int num)
1688 {
1689 struct value_history_chunk *chunk;
1690 int i;
1691 int absnum = num;
1692
1693 if (absnum <= 0)
1694 absnum += value_history_count;
1695
1696 if (absnum <= 0)
1697 {
1698 if (num == 0)
1699 error (_("The history is empty."));
1700 else if (num == 1)
1701 error (_("There is only one value in the history."));
1702 else
1703 error (_("History does not go back to $$%d."), -num);
1704 }
1705 if (absnum > value_history_count)
1706 error (_("History has not yet reached $%d."), absnum);
1707
1708 absnum--;
1709
1710 /* Now absnum is always absolute and origin zero. */
1711
1712 chunk = value_history_chain;
1713 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1714 - absnum / VALUE_HISTORY_CHUNK;
1715 i > 0; i--)
1716 chunk = chunk->next;
1717
1718 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1719 }
1720
1721 static void
1722 show_values (char *num_exp, int from_tty)
1723 {
1724 int i;
1725 struct value *val;
1726 static int num = 1;
1727
1728 if (num_exp)
1729 {
1730 /* "show values +" should print from the stored position.
1731 "show values <exp>" should print around value number <exp>. */
1732 if (num_exp[0] != '+' || num_exp[1] != '\0')
1733 num = parse_and_eval_long (num_exp) - 5;
1734 }
1735 else
1736 {
1737 /* "show values" means print the last 10 values. */
1738 num = value_history_count - 9;
1739 }
1740
1741 if (num <= 0)
1742 num = 1;
1743
1744 for (i = num; i < num + 10 && i <= value_history_count; i++)
1745 {
1746 struct value_print_options opts;
1747
1748 val = access_value_history (i);
1749 printf_filtered (("$%d = "), i);
1750 get_user_print_options (&opts);
1751 value_print (val, gdb_stdout, &opts);
1752 printf_filtered (("\n"));
1753 }
1754
1755 /* The next "show values +" should start after what we just printed. */
1756 num += 10;
1757
1758 /* Hitting just return after this command should do the same thing as
1759 "show values +". If num_exp is null, this is unnecessary, since
1760 "show values +" is not useful after "show values". */
1761 if (from_tty && num_exp)
1762 {
1763 num_exp[0] = '+';
1764 num_exp[1] = '\0';
1765 }
1766 }
1767 \f
1768 /* Internal variables. These are variables within the debugger
1769 that hold values assigned by debugger commands.
1770 The user refers to them with a '$' prefix
1771 that does not appear in the variable names stored internally. */
1772
1773 struct internalvar
1774 {
1775 struct internalvar *next;
1776 char *name;
1777
1778 /* We support various different kinds of content of an internal variable.
1779 enum internalvar_kind specifies the kind, and union internalvar_data
1780 provides the data associated with this particular kind. */
1781
1782 enum internalvar_kind
1783 {
1784 /* The internal variable is empty. */
1785 INTERNALVAR_VOID,
1786
1787 /* The value of the internal variable is provided directly as
1788 a GDB value object. */
1789 INTERNALVAR_VALUE,
1790
1791 /* A fresh value is computed via a call-back routine on every
1792 access to the internal variable. */
1793 INTERNALVAR_MAKE_VALUE,
1794
1795 /* The internal variable holds a GDB internal convenience function. */
1796 INTERNALVAR_FUNCTION,
1797
1798 /* The variable holds an integer value. */
1799 INTERNALVAR_INTEGER,
1800
1801 /* The variable holds a GDB-provided string. */
1802 INTERNALVAR_STRING,
1803
1804 } kind;
1805
1806 union internalvar_data
1807 {
1808 /* A value object used with INTERNALVAR_VALUE. */
1809 struct value *value;
1810
1811 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1812 struct
1813 {
1814 /* The functions to call. */
1815 const struct internalvar_funcs *functions;
1816
1817 /* The function's user-data. */
1818 void *data;
1819 } make_value;
1820
1821 /* The internal function used with INTERNALVAR_FUNCTION. */
1822 struct
1823 {
1824 struct internal_function *function;
1825 /* True if this is the canonical name for the function. */
1826 int canonical;
1827 } fn;
1828
1829 /* An integer value used with INTERNALVAR_INTEGER. */
1830 struct
1831 {
1832 /* If type is non-NULL, it will be used as the type to generate
1833 a value for this internal variable. If type is NULL, a default
1834 integer type for the architecture is used. */
1835 struct type *type;
1836 LONGEST val;
1837 } integer;
1838
1839 /* A string value used with INTERNALVAR_STRING. */
1840 char *string;
1841 } u;
1842 };
1843
1844 static struct internalvar *internalvars;
1845
1846 /* If the variable does not already exist create it and give it the
1847 value given. If no value is given then the default is zero. */
1848 static void
1849 init_if_undefined_command (char* args, int from_tty)
1850 {
1851 struct internalvar* intvar;
1852
1853 /* Parse the expression - this is taken from set_command(). */
1854 struct expression *expr = parse_expression (args);
1855 register struct cleanup *old_chain =
1856 make_cleanup (free_current_contents, &expr);
1857
1858 /* Validate the expression.
1859 Was the expression an assignment?
1860 Or even an expression at all? */
1861 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1862 error (_("Init-if-undefined requires an assignment expression."));
1863
1864 /* Extract the variable from the parsed expression.
1865 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1866 if (expr->elts[1].opcode != OP_INTERNALVAR)
1867 error (_("The first parameter to init-if-undefined "
1868 "should be a GDB variable."));
1869 intvar = expr->elts[2].internalvar;
1870
1871 /* Only evaluate the expression if the lvalue is void.
1872 This may still fail if the expresssion is invalid. */
1873 if (intvar->kind == INTERNALVAR_VOID)
1874 evaluate_expression (expr);
1875
1876 do_cleanups (old_chain);
1877 }
1878
1879
1880 /* Look up an internal variable with name NAME. NAME should not
1881 normally include a dollar sign.
1882
1883 If the specified internal variable does not exist,
1884 the return value is NULL. */
1885
1886 struct internalvar *
1887 lookup_only_internalvar (const char *name)
1888 {
1889 struct internalvar *var;
1890
1891 for (var = internalvars; var; var = var->next)
1892 if (strcmp (var->name, name) == 0)
1893 return var;
1894
1895 return NULL;
1896 }
1897
1898 /* Complete NAME by comparing it to the names of internal variables.
1899 Returns a vector of newly allocated strings, or NULL if no matches
1900 were found. */
1901
1902 VEC (char_ptr) *
1903 complete_internalvar (const char *name)
1904 {
1905 VEC (char_ptr) *result = NULL;
1906 struct internalvar *var;
1907 int len;
1908
1909 len = strlen (name);
1910
1911 for (var = internalvars; var; var = var->next)
1912 if (strncmp (var->name, name, len) == 0)
1913 {
1914 char *r = xstrdup (var->name);
1915
1916 VEC_safe_push (char_ptr, result, r);
1917 }
1918
1919 return result;
1920 }
1921
1922 /* Create an internal variable with name NAME and with a void value.
1923 NAME should not normally include a dollar sign. */
1924
1925 struct internalvar *
1926 create_internalvar (const char *name)
1927 {
1928 struct internalvar *var;
1929
1930 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1931 var->name = concat (name, (char *)NULL);
1932 var->kind = INTERNALVAR_VOID;
1933 var->next = internalvars;
1934 internalvars = var;
1935 return var;
1936 }
1937
1938 /* Create an internal variable with name NAME and register FUN as the
1939 function that value_of_internalvar uses to create a value whenever
1940 this variable is referenced. NAME should not normally include a
1941 dollar sign. DATA is passed uninterpreted to FUN when it is
1942 called. CLEANUP, if not NULL, is called when the internal variable
1943 is destroyed. It is passed DATA as its only argument. */
1944
1945 struct internalvar *
1946 create_internalvar_type_lazy (const char *name,
1947 const struct internalvar_funcs *funcs,
1948 void *data)
1949 {
1950 struct internalvar *var = create_internalvar (name);
1951
1952 var->kind = INTERNALVAR_MAKE_VALUE;
1953 var->u.make_value.functions = funcs;
1954 var->u.make_value.data = data;
1955 return var;
1956 }
1957
1958 /* See documentation in value.h. */
1959
1960 int
1961 compile_internalvar_to_ax (struct internalvar *var,
1962 struct agent_expr *expr,
1963 struct axs_value *value)
1964 {
1965 if (var->kind != INTERNALVAR_MAKE_VALUE
1966 || var->u.make_value.functions->compile_to_ax == NULL)
1967 return 0;
1968
1969 var->u.make_value.functions->compile_to_ax (var, expr, value,
1970 var->u.make_value.data);
1971 return 1;
1972 }
1973
1974 /* Look up an internal variable with name NAME. NAME should not
1975 normally include a dollar sign.
1976
1977 If the specified internal variable does not exist,
1978 one is created, with a void value. */
1979
1980 struct internalvar *
1981 lookup_internalvar (const char *name)
1982 {
1983 struct internalvar *var;
1984
1985 var = lookup_only_internalvar (name);
1986 if (var)
1987 return var;
1988
1989 return create_internalvar (name);
1990 }
1991
1992 /* Return current value of internal variable VAR. For variables that
1993 are not inherently typed, use a value type appropriate for GDBARCH. */
1994
1995 struct value *
1996 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
1997 {
1998 struct value *val;
1999 struct trace_state_variable *tsv;
2000
2001 /* If there is a trace state variable of the same name, assume that
2002 is what we really want to see. */
2003 tsv = find_trace_state_variable (var->name);
2004 if (tsv)
2005 {
2006 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2007 &(tsv->value));
2008 if (tsv->value_known)
2009 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2010 tsv->value);
2011 else
2012 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2013 return val;
2014 }
2015
2016 switch (var->kind)
2017 {
2018 case INTERNALVAR_VOID:
2019 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2020 break;
2021
2022 case INTERNALVAR_FUNCTION:
2023 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2024 break;
2025
2026 case INTERNALVAR_INTEGER:
2027 if (!var->u.integer.type)
2028 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2029 var->u.integer.val);
2030 else
2031 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2032 break;
2033
2034 case INTERNALVAR_STRING:
2035 val = value_cstring (var->u.string, strlen (var->u.string),
2036 builtin_type (gdbarch)->builtin_char);
2037 break;
2038
2039 case INTERNALVAR_VALUE:
2040 val = value_copy (var->u.value);
2041 if (value_lazy (val))
2042 value_fetch_lazy (val);
2043 break;
2044
2045 case INTERNALVAR_MAKE_VALUE:
2046 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2047 var->u.make_value.data);
2048 break;
2049
2050 default:
2051 internal_error (__FILE__, __LINE__, _("bad kind"));
2052 }
2053
2054 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2055 on this value go back to affect the original internal variable.
2056
2057 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2058 no underlying modifyable state in the internal variable.
2059
2060 Likewise, if the variable's value is a computed lvalue, we want
2061 references to it to produce another computed lvalue, where
2062 references and assignments actually operate through the
2063 computed value's functions.
2064
2065 This means that internal variables with computed values
2066 behave a little differently from other internal variables:
2067 assignments to them don't just replace the previous value
2068 altogether. At the moment, this seems like the behavior we
2069 want. */
2070
2071 if (var->kind != INTERNALVAR_MAKE_VALUE
2072 && val->lval != lval_computed)
2073 {
2074 VALUE_LVAL (val) = lval_internalvar;
2075 VALUE_INTERNALVAR (val) = var;
2076 }
2077
2078 return val;
2079 }
2080
2081 int
2082 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2083 {
2084 if (var->kind == INTERNALVAR_INTEGER)
2085 {
2086 *result = var->u.integer.val;
2087 return 1;
2088 }
2089
2090 if (var->kind == INTERNALVAR_VALUE)
2091 {
2092 struct type *type = check_typedef (value_type (var->u.value));
2093
2094 if (TYPE_CODE (type) == TYPE_CODE_INT)
2095 {
2096 *result = value_as_long (var->u.value);
2097 return 1;
2098 }
2099 }
2100
2101 return 0;
2102 }
2103
2104 static int
2105 get_internalvar_function (struct internalvar *var,
2106 struct internal_function **result)
2107 {
2108 switch (var->kind)
2109 {
2110 case INTERNALVAR_FUNCTION:
2111 *result = var->u.fn.function;
2112 return 1;
2113
2114 default:
2115 return 0;
2116 }
2117 }
2118
2119 void
2120 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2121 int bitsize, struct value *newval)
2122 {
2123 gdb_byte *addr;
2124
2125 switch (var->kind)
2126 {
2127 case INTERNALVAR_VALUE:
2128 addr = value_contents_writeable (var->u.value);
2129
2130 if (bitsize)
2131 modify_field (value_type (var->u.value), addr + offset,
2132 value_as_long (newval), bitpos, bitsize);
2133 else
2134 memcpy (addr + offset, value_contents (newval),
2135 TYPE_LENGTH (value_type (newval)));
2136 break;
2137
2138 default:
2139 /* We can never get a component of any other kind. */
2140 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2141 }
2142 }
2143
2144 void
2145 set_internalvar (struct internalvar *var, struct value *val)
2146 {
2147 enum internalvar_kind new_kind;
2148 union internalvar_data new_data = { 0 };
2149
2150 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2151 error (_("Cannot overwrite convenience function %s"), var->name);
2152
2153 /* Prepare new contents. */
2154 switch (TYPE_CODE (check_typedef (value_type (val))))
2155 {
2156 case TYPE_CODE_VOID:
2157 new_kind = INTERNALVAR_VOID;
2158 break;
2159
2160 case TYPE_CODE_INTERNAL_FUNCTION:
2161 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2162 new_kind = INTERNALVAR_FUNCTION;
2163 get_internalvar_function (VALUE_INTERNALVAR (val),
2164 &new_data.fn.function);
2165 /* Copies created here are never canonical. */
2166 break;
2167
2168 default:
2169 new_kind = INTERNALVAR_VALUE;
2170 new_data.value = value_copy (val);
2171 new_data.value->modifiable = 1;
2172
2173 /* Force the value to be fetched from the target now, to avoid problems
2174 later when this internalvar is referenced and the target is gone or
2175 has changed. */
2176 if (value_lazy (new_data.value))
2177 value_fetch_lazy (new_data.value);
2178
2179 /* Release the value from the value chain to prevent it from being
2180 deleted by free_all_values. From here on this function should not
2181 call error () until new_data is installed into the var->u to avoid
2182 leaking memory. */
2183 release_value (new_data.value);
2184 break;
2185 }
2186
2187 /* Clean up old contents. */
2188 clear_internalvar (var);
2189
2190 /* Switch over. */
2191 var->kind = new_kind;
2192 var->u = new_data;
2193 /* End code which must not call error(). */
2194 }
2195
2196 void
2197 set_internalvar_integer (struct internalvar *var, LONGEST l)
2198 {
2199 /* Clean up old contents. */
2200 clear_internalvar (var);
2201
2202 var->kind = INTERNALVAR_INTEGER;
2203 var->u.integer.type = NULL;
2204 var->u.integer.val = l;
2205 }
2206
2207 void
2208 set_internalvar_string (struct internalvar *var, const char *string)
2209 {
2210 /* Clean up old contents. */
2211 clear_internalvar (var);
2212
2213 var->kind = INTERNALVAR_STRING;
2214 var->u.string = xstrdup (string);
2215 }
2216
2217 static void
2218 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2219 {
2220 /* Clean up old contents. */
2221 clear_internalvar (var);
2222
2223 var->kind = INTERNALVAR_FUNCTION;
2224 var->u.fn.function = f;
2225 var->u.fn.canonical = 1;
2226 /* Variables installed here are always the canonical version. */
2227 }
2228
2229 void
2230 clear_internalvar (struct internalvar *var)
2231 {
2232 /* Clean up old contents. */
2233 switch (var->kind)
2234 {
2235 case INTERNALVAR_VALUE:
2236 value_free (var->u.value);
2237 break;
2238
2239 case INTERNALVAR_STRING:
2240 xfree (var->u.string);
2241 break;
2242
2243 case INTERNALVAR_MAKE_VALUE:
2244 if (var->u.make_value.functions->destroy != NULL)
2245 var->u.make_value.functions->destroy (var->u.make_value.data);
2246 break;
2247
2248 default:
2249 break;
2250 }
2251
2252 /* Reset to void kind. */
2253 var->kind = INTERNALVAR_VOID;
2254 }
2255
2256 char *
2257 internalvar_name (struct internalvar *var)
2258 {
2259 return var->name;
2260 }
2261
2262 static struct internal_function *
2263 create_internal_function (const char *name,
2264 internal_function_fn handler, void *cookie)
2265 {
2266 struct internal_function *ifn = XNEW (struct internal_function);
2267
2268 ifn->name = xstrdup (name);
2269 ifn->handler = handler;
2270 ifn->cookie = cookie;
2271 return ifn;
2272 }
2273
2274 char *
2275 value_internal_function_name (struct value *val)
2276 {
2277 struct internal_function *ifn;
2278 int result;
2279
2280 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2281 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2282 gdb_assert (result);
2283
2284 return ifn->name;
2285 }
2286
2287 struct value *
2288 call_internal_function (struct gdbarch *gdbarch,
2289 const struct language_defn *language,
2290 struct value *func, int argc, struct value **argv)
2291 {
2292 struct internal_function *ifn;
2293 int result;
2294
2295 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2296 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2297 gdb_assert (result);
2298
2299 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2300 }
2301
2302 /* The 'function' command. This does nothing -- it is just a
2303 placeholder to let "help function NAME" work. This is also used as
2304 the implementation of the sub-command that is created when
2305 registering an internal function. */
2306 static void
2307 function_command (char *command, int from_tty)
2308 {
2309 /* Do nothing. */
2310 }
2311
2312 /* Clean up if an internal function's command is destroyed. */
2313 static void
2314 function_destroyer (struct cmd_list_element *self, void *ignore)
2315 {
2316 xfree ((char *) self->name);
2317 xfree ((char *) self->doc);
2318 }
2319
2320 /* Add a new internal function. NAME is the name of the function; DOC
2321 is a documentation string describing the function. HANDLER is
2322 called when the function is invoked. COOKIE is an arbitrary
2323 pointer which is passed to HANDLER and is intended for "user
2324 data". */
2325 void
2326 add_internal_function (const char *name, const char *doc,
2327 internal_function_fn handler, void *cookie)
2328 {
2329 struct cmd_list_element *cmd;
2330 struct internal_function *ifn;
2331 struct internalvar *var = lookup_internalvar (name);
2332
2333 ifn = create_internal_function (name, handler, cookie);
2334 set_internalvar_function (var, ifn);
2335
2336 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2337 &functionlist);
2338 cmd->destroyer = function_destroyer;
2339 }
2340
2341 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2342 prevent cycles / duplicates. */
2343
2344 void
2345 preserve_one_value (struct value *value, struct objfile *objfile,
2346 htab_t copied_types)
2347 {
2348 if (TYPE_OBJFILE (value->type) == objfile)
2349 value->type = copy_type_recursive (objfile, value->type, copied_types);
2350
2351 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2352 value->enclosing_type = copy_type_recursive (objfile,
2353 value->enclosing_type,
2354 copied_types);
2355 }
2356
2357 /* Likewise for internal variable VAR. */
2358
2359 static void
2360 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2361 htab_t copied_types)
2362 {
2363 switch (var->kind)
2364 {
2365 case INTERNALVAR_INTEGER:
2366 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2367 var->u.integer.type
2368 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2369 break;
2370
2371 case INTERNALVAR_VALUE:
2372 preserve_one_value (var->u.value, objfile, copied_types);
2373 break;
2374 }
2375 }
2376
2377 /* Update the internal variables and value history when OBJFILE is
2378 discarded; we must copy the types out of the objfile. New global types
2379 will be created for every convenience variable which currently points to
2380 this objfile's types, and the convenience variables will be adjusted to
2381 use the new global types. */
2382
2383 void
2384 preserve_values (struct objfile *objfile)
2385 {
2386 htab_t copied_types;
2387 struct value_history_chunk *cur;
2388 struct internalvar *var;
2389 int i;
2390
2391 /* Create the hash table. We allocate on the objfile's obstack, since
2392 it is soon to be deleted. */
2393 copied_types = create_copied_types_hash (objfile);
2394
2395 for (cur = value_history_chain; cur; cur = cur->next)
2396 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2397 if (cur->values[i])
2398 preserve_one_value (cur->values[i], objfile, copied_types);
2399
2400 for (var = internalvars; var; var = var->next)
2401 preserve_one_internalvar (var, objfile, copied_types);
2402
2403 preserve_ext_lang_values (objfile, copied_types);
2404
2405 htab_delete (copied_types);
2406 }
2407
2408 static void
2409 show_convenience (char *ignore, int from_tty)
2410 {
2411 struct gdbarch *gdbarch = get_current_arch ();
2412 struct internalvar *var;
2413 int varseen = 0;
2414 struct value_print_options opts;
2415
2416 get_user_print_options (&opts);
2417 for (var = internalvars; var; var = var->next)
2418 {
2419 volatile struct gdb_exception ex;
2420
2421 if (!varseen)
2422 {
2423 varseen = 1;
2424 }
2425 printf_filtered (("$%s = "), var->name);
2426
2427 TRY_CATCH (ex, RETURN_MASK_ERROR)
2428 {
2429 struct value *val;
2430
2431 val = value_of_internalvar (gdbarch, var);
2432 value_print (val, gdb_stdout, &opts);
2433 }
2434 if (ex.reason < 0)
2435 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2436 printf_filtered (("\n"));
2437 }
2438 if (!varseen)
2439 {
2440 /* This text does not mention convenience functions on purpose.
2441 The user can't create them except via Python, and if Python support
2442 is installed this message will never be printed ($_streq will
2443 exist). */
2444 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2445 "Convenience variables have "
2446 "names starting with \"$\";\n"
2447 "use \"set\" as in \"set "
2448 "$foo = 5\" to define them.\n"));
2449 }
2450 }
2451 \f
2452 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2453
2454 struct value *
2455 value_of_xmethod (struct xmethod_worker *worker)
2456 {
2457 if (worker->value == NULL)
2458 {
2459 struct value *v;
2460
2461 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2462 v->lval = lval_xcallable;
2463 v->location.xm_worker = worker;
2464 v->modifiable = 0;
2465 worker->value = v;
2466 }
2467
2468 return worker->value;
2469 }
2470
2471 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2472
2473 struct value *
2474 call_xmethod (struct value *method, int argc, struct value **argv)
2475 {
2476 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2477 && method->lval == lval_xcallable && argc > 0);
2478
2479 return invoke_xmethod (method->location.xm_worker,
2480 argv[0], argv + 1, argc - 1);
2481 }
2482 \f
2483 /* Extract a value as a C number (either long or double).
2484 Knows how to convert fixed values to double, or
2485 floating values to long.
2486 Does not deallocate the value. */
2487
2488 LONGEST
2489 value_as_long (struct value *val)
2490 {
2491 /* This coerces arrays and functions, which is necessary (e.g.
2492 in disassemble_command). It also dereferences references, which
2493 I suspect is the most logical thing to do. */
2494 val = coerce_array (val);
2495 return unpack_long (value_type (val), value_contents (val));
2496 }
2497
2498 DOUBLEST
2499 value_as_double (struct value *val)
2500 {
2501 DOUBLEST foo;
2502 int inv;
2503
2504 foo = unpack_double (value_type (val), value_contents (val), &inv);
2505 if (inv)
2506 error (_("Invalid floating value found in program."));
2507 return foo;
2508 }
2509
2510 /* Extract a value as a C pointer. Does not deallocate the value.
2511 Note that val's type may not actually be a pointer; value_as_long
2512 handles all the cases. */
2513 CORE_ADDR
2514 value_as_address (struct value *val)
2515 {
2516 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2517
2518 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2519 whether we want this to be true eventually. */
2520 #if 0
2521 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2522 non-address (e.g. argument to "signal", "info break", etc.), or
2523 for pointers to char, in which the low bits *are* significant. */
2524 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2525 #else
2526
2527 /* There are several targets (IA-64, PowerPC, and others) which
2528 don't represent pointers to functions as simply the address of
2529 the function's entry point. For example, on the IA-64, a
2530 function pointer points to a two-word descriptor, generated by
2531 the linker, which contains the function's entry point, and the
2532 value the IA-64 "global pointer" register should have --- to
2533 support position-independent code. The linker generates
2534 descriptors only for those functions whose addresses are taken.
2535
2536 On such targets, it's difficult for GDB to convert an arbitrary
2537 function address into a function pointer; it has to either find
2538 an existing descriptor for that function, or call malloc and
2539 build its own. On some targets, it is impossible for GDB to
2540 build a descriptor at all: the descriptor must contain a jump
2541 instruction; data memory cannot be executed; and code memory
2542 cannot be modified.
2543
2544 Upon entry to this function, if VAL is a value of type `function'
2545 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2546 value_address (val) is the address of the function. This is what
2547 you'll get if you evaluate an expression like `main'. The call
2548 to COERCE_ARRAY below actually does all the usual unary
2549 conversions, which includes converting values of type `function'
2550 to `pointer to function'. This is the challenging conversion
2551 discussed above. Then, `unpack_long' will convert that pointer
2552 back into an address.
2553
2554 So, suppose the user types `disassemble foo' on an architecture
2555 with a strange function pointer representation, on which GDB
2556 cannot build its own descriptors, and suppose further that `foo'
2557 has no linker-built descriptor. The address->pointer conversion
2558 will signal an error and prevent the command from running, even
2559 though the next step would have been to convert the pointer
2560 directly back into the same address.
2561
2562 The following shortcut avoids this whole mess. If VAL is a
2563 function, just return its address directly. */
2564 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2565 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2566 return value_address (val);
2567
2568 val = coerce_array (val);
2569
2570 /* Some architectures (e.g. Harvard), map instruction and data
2571 addresses onto a single large unified address space. For
2572 instance: An architecture may consider a large integer in the
2573 range 0x10000000 .. 0x1000ffff to already represent a data
2574 addresses (hence not need a pointer to address conversion) while
2575 a small integer would still need to be converted integer to
2576 pointer to address. Just assume such architectures handle all
2577 integer conversions in a single function. */
2578
2579 /* JimB writes:
2580
2581 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2582 must admonish GDB hackers to make sure its behavior matches the
2583 compiler's, whenever possible.
2584
2585 In general, I think GDB should evaluate expressions the same way
2586 the compiler does. When the user copies an expression out of
2587 their source code and hands it to a `print' command, they should
2588 get the same value the compiler would have computed. Any
2589 deviation from this rule can cause major confusion and annoyance,
2590 and needs to be justified carefully. In other words, GDB doesn't
2591 really have the freedom to do these conversions in clever and
2592 useful ways.
2593
2594 AndrewC pointed out that users aren't complaining about how GDB
2595 casts integers to pointers; they are complaining that they can't
2596 take an address from a disassembly listing and give it to `x/i'.
2597 This is certainly important.
2598
2599 Adding an architecture method like integer_to_address() certainly
2600 makes it possible for GDB to "get it right" in all circumstances
2601 --- the target has complete control over how things get done, so
2602 people can Do The Right Thing for their target without breaking
2603 anyone else. The standard doesn't specify how integers get
2604 converted to pointers; usually, the ABI doesn't either, but
2605 ABI-specific code is a more reasonable place to handle it. */
2606
2607 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2608 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2609 && gdbarch_integer_to_address_p (gdbarch))
2610 return gdbarch_integer_to_address (gdbarch, value_type (val),
2611 value_contents (val));
2612
2613 return unpack_long (value_type (val), value_contents (val));
2614 #endif
2615 }
2616 \f
2617 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2618 as a long, or as a double, assuming the raw data is described
2619 by type TYPE. Knows how to convert different sizes of values
2620 and can convert between fixed and floating point. We don't assume
2621 any alignment for the raw data. Return value is in host byte order.
2622
2623 If you want functions and arrays to be coerced to pointers, and
2624 references to be dereferenced, call value_as_long() instead.
2625
2626 C++: It is assumed that the front-end has taken care of
2627 all matters concerning pointers to members. A pointer
2628 to member which reaches here is considered to be equivalent
2629 to an INT (or some size). After all, it is only an offset. */
2630
2631 LONGEST
2632 unpack_long (struct type *type, const gdb_byte *valaddr)
2633 {
2634 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2635 enum type_code code = TYPE_CODE (type);
2636 int len = TYPE_LENGTH (type);
2637 int nosign = TYPE_UNSIGNED (type);
2638
2639 switch (code)
2640 {
2641 case TYPE_CODE_TYPEDEF:
2642 return unpack_long (check_typedef (type), valaddr);
2643 case TYPE_CODE_ENUM:
2644 case TYPE_CODE_FLAGS:
2645 case TYPE_CODE_BOOL:
2646 case TYPE_CODE_INT:
2647 case TYPE_CODE_CHAR:
2648 case TYPE_CODE_RANGE:
2649 case TYPE_CODE_MEMBERPTR:
2650 if (nosign)
2651 return extract_unsigned_integer (valaddr, len, byte_order);
2652 else
2653 return extract_signed_integer (valaddr, len, byte_order);
2654
2655 case TYPE_CODE_FLT:
2656 return extract_typed_floating (valaddr, type);
2657
2658 case TYPE_CODE_DECFLOAT:
2659 /* libdecnumber has a function to convert from decimal to integer, but
2660 it doesn't work when the decimal number has a fractional part. */
2661 return decimal_to_doublest (valaddr, len, byte_order);
2662
2663 case TYPE_CODE_PTR:
2664 case TYPE_CODE_REF:
2665 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2666 whether we want this to be true eventually. */
2667 return extract_typed_address (valaddr, type);
2668
2669 default:
2670 error (_("Value can't be converted to integer."));
2671 }
2672 return 0; /* Placate lint. */
2673 }
2674
2675 /* Return a double value from the specified type and address.
2676 INVP points to an int which is set to 0 for valid value,
2677 1 for invalid value (bad float format). In either case,
2678 the returned double is OK to use. Argument is in target
2679 format, result is in host format. */
2680
2681 DOUBLEST
2682 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2683 {
2684 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2685 enum type_code code;
2686 int len;
2687 int nosign;
2688
2689 *invp = 0; /* Assume valid. */
2690 CHECK_TYPEDEF (type);
2691 code = TYPE_CODE (type);
2692 len = TYPE_LENGTH (type);
2693 nosign = TYPE_UNSIGNED (type);
2694 if (code == TYPE_CODE_FLT)
2695 {
2696 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2697 floating-point value was valid (using the macro
2698 INVALID_FLOAT). That test/macro have been removed.
2699
2700 It turns out that only the VAX defined this macro and then
2701 only in a non-portable way. Fixing the portability problem
2702 wouldn't help since the VAX floating-point code is also badly
2703 bit-rotten. The target needs to add definitions for the
2704 methods gdbarch_float_format and gdbarch_double_format - these
2705 exactly describe the target floating-point format. The
2706 problem here is that the corresponding floatformat_vax_f and
2707 floatformat_vax_d values these methods should be set to are
2708 also not defined either. Oops!
2709
2710 Hopefully someone will add both the missing floatformat
2711 definitions and the new cases for floatformat_is_valid (). */
2712
2713 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2714 {
2715 *invp = 1;
2716 return 0.0;
2717 }
2718
2719 return extract_typed_floating (valaddr, type);
2720 }
2721 else if (code == TYPE_CODE_DECFLOAT)
2722 return decimal_to_doublest (valaddr, len, byte_order);
2723 else if (nosign)
2724 {
2725 /* Unsigned -- be sure we compensate for signed LONGEST. */
2726 return (ULONGEST) unpack_long (type, valaddr);
2727 }
2728 else
2729 {
2730 /* Signed -- we are OK with unpack_long. */
2731 return unpack_long (type, valaddr);
2732 }
2733 }
2734
2735 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2736 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2737 We don't assume any alignment for the raw data. Return value is in
2738 host byte order.
2739
2740 If you want functions and arrays to be coerced to pointers, and
2741 references to be dereferenced, call value_as_address() instead.
2742
2743 C++: It is assumed that the front-end has taken care of
2744 all matters concerning pointers to members. A pointer
2745 to member which reaches here is considered to be equivalent
2746 to an INT (or some size). After all, it is only an offset. */
2747
2748 CORE_ADDR
2749 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2750 {
2751 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2752 whether we want this to be true eventually. */
2753 return unpack_long (type, valaddr);
2754 }
2755
2756 \f
2757 /* Get the value of the FIELDNO'th field (which must be static) of
2758 TYPE. */
2759
2760 struct value *
2761 value_static_field (struct type *type, int fieldno)
2762 {
2763 struct value *retval;
2764
2765 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2766 {
2767 case FIELD_LOC_KIND_PHYSADDR:
2768 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2769 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2770 break;
2771 case FIELD_LOC_KIND_PHYSNAME:
2772 {
2773 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2774 /* TYPE_FIELD_NAME (type, fieldno); */
2775 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2776
2777 if (sym == NULL)
2778 {
2779 /* With some compilers, e.g. HP aCC, static data members are
2780 reported as non-debuggable symbols. */
2781 struct bound_minimal_symbol msym
2782 = lookup_minimal_symbol (phys_name, NULL, NULL);
2783
2784 if (!msym.minsym)
2785 return allocate_optimized_out_value (type);
2786 else
2787 {
2788 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2789 BMSYMBOL_VALUE_ADDRESS (msym));
2790 }
2791 }
2792 else
2793 retval = value_of_variable (sym, NULL);
2794 break;
2795 }
2796 default:
2797 gdb_assert_not_reached ("unexpected field location kind");
2798 }
2799
2800 return retval;
2801 }
2802
2803 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2804 You have to be careful here, since the size of the data area for the value
2805 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2806 than the old enclosing type, you have to allocate more space for the
2807 data. */
2808
2809 void
2810 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2811 {
2812 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2813 val->contents =
2814 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2815
2816 val->enclosing_type = new_encl_type;
2817 }
2818
2819 /* Given a value ARG1 (offset by OFFSET bytes)
2820 of a struct or union type ARG_TYPE,
2821 extract and return the value of one of its (non-static) fields.
2822 FIELDNO says which field. */
2823
2824 struct value *
2825 value_primitive_field (struct value *arg1, int offset,
2826 int fieldno, struct type *arg_type)
2827 {
2828 struct value *v;
2829 struct type *type;
2830
2831 CHECK_TYPEDEF (arg_type);
2832 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2833
2834 /* Call check_typedef on our type to make sure that, if TYPE
2835 is a TYPE_CODE_TYPEDEF, its length is set to the length
2836 of the target type instead of zero. However, we do not
2837 replace the typedef type by the target type, because we want
2838 to keep the typedef in order to be able to print the type
2839 description correctly. */
2840 check_typedef (type);
2841
2842 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2843 {
2844 /* Handle packed fields.
2845
2846 Create a new value for the bitfield, with bitpos and bitsize
2847 set. If possible, arrange offset and bitpos so that we can
2848 do a single aligned read of the size of the containing type.
2849 Otherwise, adjust offset to the byte containing the first
2850 bit. Assume that the address, offset, and embedded offset
2851 are sufficiently aligned. */
2852
2853 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2854 int container_bitsize = TYPE_LENGTH (type) * 8;
2855
2856 if (arg1->optimized_out)
2857 v = allocate_optimized_out_value (type);
2858 else
2859 {
2860 v = allocate_value_lazy (type);
2861 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2862 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2863 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2864 v->bitpos = bitpos % container_bitsize;
2865 else
2866 v->bitpos = bitpos % 8;
2867 v->offset = (value_embedded_offset (arg1)
2868 + offset
2869 + (bitpos - v->bitpos) / 8);
2870 set_value_parent (v, arg1);
2871 if (!value_lazy (arg1))
2872 value_fetch_lazy (v);
2873 }
2874 }
2875 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2876 {
2877 /* This field is actually a base subobject, so preserve the
2878 entire object's contents for later references to virtual
2879 bases, etc. */
2880 int boffset;
2881
2882 /* Lazy register values with offsets are not supported. */
2883 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2884 value_fetch_lazy (arg1);
2885
2886 /* The optimized_out flag is only set correctly once a lazy value is
2887 loaded, having just loaded some lazy values we should check the
2888 optimized out case now. */
2889 if (arg1->optimized_out)
2890 v = allocate_optimized_out_value (type);
2891 else
2892 {
2893 /* We special case virtual inheritance here because this
2894 requires access to the contents, which we would rather avoid
2895 for references to ordinary fields of unavailable values. */
2896 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2897 boffset = baseclass_offset (arg_type, fieldno,
2898 value_contents (arg1),
2899 value_embedded_offset (arg1),
2900 value_address (arg1),
2901 arg1);
2902 else
2903 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2904
2905 if (value_lazy (arg1))
2906 v = allocate_value_lazy (value_enclosing_type (arg1));
2907 else
2908 {
2909 v = allocate_value (value_enclosing_type (arg1));
2910 value_contents_copy_raw (v, 0, arg1, 0,
2911 TYPE_LENGTH (value_enclosing_type (arg1)));
2912 }
2913 v->type = type;
2914 v->offset = value_offset (arg1);
2915 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2916 }
2917 }
2918 else
2919 {
2920 /* Plain old data member */
2921 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2922
2923 /* Lazy register values with offsets are not supported. */
2924 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2925 value_fetch_lazy (arg1);
2926
2927 /* The optimized_out flag is only set correctly once a lazy value is
2928 loaded, having just loaded some lazy values we should check for
2929 the optimized out case now. */
2930 if (arg1->optimized_out)
2931 v = allocate_optimized_out_value (type);
2932 else if (value_lazy (arg1))
2933 v = allocate_value_lazy (type);
2934 else
2935 {
2936 v = allocate_value (type);
2937 value_contents_copy_raw (v, value_embedded_offset (v),
2938 arg1, value_embedded_offset (arg1) + offset,
2939 TYPE_LENGTH (type));
2940 }
2941 v->offset = (value_offset (arg1) + offset
2942 + value_embedded_offset (arg1));
2943 }
2944 set_value_component_location (v, arg1);
2945 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2946 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2947 return v;
2948 }
2949
2950 /* Given a value ARG1 of a struct or union type,
2951 extract and return the value of one of its (non-static) fields.
2952 FIELDNO says which field. */
2953
2954 struct value *
2955 value_field (struct value *arg1, int fieldno)
2956 {
2957 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2958 }
2959
2960 /* Return a non-virtual function as a value.
2961 F is the list of member functions which contains the desired method.
2962 J is an index into F which provides the desired method.
2963
2964 We only use the symbol for its address, so be happy with either a
2965 full symbol or a minimal symbol. */
2966
2967 struct value *
2968 value_fn_field (struct value **arg1p, struct fn_field *f,
2969 int j, struct type *type,
2970 int offset)
2971 {
2972 struct value *v;
2973 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2974 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2975 struct symbol *sym;
2976 struct bound_minimal_symbol msym;
2977
2978 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2979 if (sym != NULL)
2980 {
2981 memset (&msym, 0, sizeof (msym));
2982 }
2983 else
2984 {
2985 gdb_assert (sym == NULL);
2986 msym = lookup_bound_minimal_symbol (physname);
2987 if (msym.minsym == NULL)
2988 return NULL;
2989 }
2990
2991 v = allocate_value (ftype);
2992 if (sym)
2993 {
2994 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
2995 }
2996 else
2997 {
2998 /* The minimal symbol might point to a function descriptor;
2999 resolve it to the actual code address instead. */
3000 struct objfile *objfile = msym.objfile;
3001 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3002
3003 set_value_address (v,
3004 gdbarch_convert_from_func_ptr_addr
3005 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3006 }
3007
3008 if (arg1p)
3009 {
3010 if (type != value_type (*arg1p))
3011 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3012 value_addr (*arg1p)));
3013
3014 /* Move the `this' pointer according to the offset.
3015 VALUE_OFFSET (*arg1p) += offset; */
3016 }
3017
3018 return v;
3019 }
3020
3021 \f
3022
3023 /* Helper function for both unpack_value_bits_as_long and
3024 unpack_bits_as_long. See those functions for more details on the
3025 interface; the only difference is that this function accepts either
3026 a NULL or a non-NULL ORIGINAL_VALUE. */
3027
3028 static int
3029 unpack_value_bits_as_long_1 (struct type *field_type, const gdb_byte *valaddr,
3030 int embedded_offset, int bitpos, int bitsize,
3031 const struct value *original_value,
3032 LONGEST *result)
3033 {
3034 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3035 ULONGEST val;
3036 ULONGEST valmask;
3037 int lsbcount;
3038 int bytes_read;
3039 int read_offset;
3040
3041 /* Read the minimum number of bytes required; there may not be
3042 enough bytes to read an entire ULONGEST. */
3043 CHECK_TYPEDEF (field_type);
3044 if (bitsize)
3045 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3046 else
3047 bytes_read = TYPE_LENGTH (field_type);
3048
3049 read_offset = bitpos / 8;
3050
3051 if (original_value != NULL
3052 && !value_bits_available (original_value, embedded_offset + bitpos,
3053 bitsize))
3054 return 0;
3055
3056 val = extract_unsigned_integer (valaddr + embedded_offset + read_offset,
3057 bytes_read, byte_order);
3058
3059 /* Extract bits. See comment above. */
3060
3061 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3062 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3063 else
3064 lsbcount = (bitpos % 8);
3065 val >>= lsbcount;
3066
3067 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3068 If the field is signed, and is negative, then sign extend. */
3069
3070 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3071 {
3072 valmask = (((ULONGEST) 1) << bitsize) - 1;
3073 val &= valmask;
3074 if (!TYPE_UNSIGNED (field_type))
3075 {
3076 if (val & (valmask ^ (valmask >> 1)))
3077 {
3078 val |= ~valmask;
3079 }
3080 }
3081 }
3082
3083 *result = val;
3084 return 1;
3085 }
3086
3087 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3088 VALADDR + EMBEDDED_OFFSET, and store the result in *RESULT.
3089 VALADDR points to the contents of ORIGINAL_VALUE, which must not be
3090 NULL. The bitfield starts at BITPOS bits and contains BITSIZE
3091 bits.
3092
3093 Returns false if the value contents are unavailable, otherwise
3094 returns true, indicating a valid value has been stored in *RESULT.
3095
3096 Extracting bits depends on endianness of the machine. Compute the
3097 number of least significant bits to discard. For big endian machines,
3098 we compute the total number of bits in the anonymous object, subtract
3099 off the bit count from the MSB of the object to the MSB of the
3100 bitfield, then the size of the bitfield, which leaves the LSB discard
3101 count. For little endian machines, the discard count is simply the
3102 number of bits from the LSB of the anonymous object to the LSB of the
3103 bitfield.
3104
3105 If the field is signed, we also do sign extension. */
3106
3107 int
3108 unpack_value_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3109 int embedded_offset, int bitpos, int bitsize,
3110 const struct value *original_value,
3111 LONGEST *result)
3112 {
3113 gdb_assert (original_value != NULL);
3114
3115 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3116 bitpos, bitsize, original_value, result);
3117
3118 }
3119
3120 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3121 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3122 ORIGINAL_VALUE. See unpack_value_bits_as_long for more
3123 details. */
3124
3125 static int
3126 unpack_value_field_as_long_1 (struct type *type, const gdb_byte *valaddr,
3127 int embedded_offset, int fieldno,
3128 const struct value *val, LONGEST *result)
3129 {
3130 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3131 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3132 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3133
3134 return unpack_value_bits_as_long_1 (field_type, valaddr, embedded_offset,
3135 bitpos, bitsize, val,
3136 result);
3137 }
3138
3139 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3140 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3141 ORIGINAL_VALUE, which must not be NULL. See
3142 unpack_value_bits_as_long for more details. */
3143
3144 int
3145 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3146 int embedded_offset, int fieldno,
3147 const struct value *val, LONGEST *result)
3148 {
3149 gdb_assert (val != NULL);
3150
3151 return unpack_value_field_as_long_1 (type, valaddr, embedded_offset,
3152 fieldno, val, result);
3153 }
3154
3155 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3156 object at VALADDR. See unpack_value_bits_as_long for more details.
3157 This function differs from unpack_value_field_as_long in that it
3158 operates without a struct value object. */
3159
3160 LONGEST
3161 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3162 {
3163 LONGEST result;
3164
3165 unpack_value_field_as_long_1 (type, valaddr, 0, fieldno, NULL, &result);
3166 return result;
3167 }
3168
3169 /* Return a new value with type TYPE, which is FIELDNO field of the
3170 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3171 of VAL. If the VAL's contents required to extract the bitfield
3172 from are unavailable, the new value is correspondingly marked as
3173 unavailable. */
3174
3175 struct value *
3176 value_field_bitfield (struct type *type, int fieldno,
3177 const gdb_byte *valaddr,
3178 int embedded_offset, const struct value *val)
3179 {
3180 LONGEST l;
3181
3182 if (!unpack_value_field_as_long (type, valaddr, embedded_offset, fieldno,
3183 val, &l))
3184 {
3185 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3186 struct value *retval = allocate_value (field_type);
3187 mark_value_bytes_unavailable (retval, 0, TYPE_LENGTH (field_type));
3188 return retval;
3189 }
3190 else
3191 {
3192 return value_from_longest (TYPE_FIELD_TYPE (type, fieldno), l);
3193 }
3194 }
3195
3196 /* Modify the value of a bitfield. ADDR points to a block of memory in
3197 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3198 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3199 indicate which bits (in target bit order) comprise the bitfield.
3200 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3201 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3202
3203 void
3204 modify_field (struct type *type, gdb_byte *addr,
3205 LONGEST fieldval, int bitpos, int bitsize)
3206 {
3207 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3208 ULONGEST oword;
3209 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3210 int bytesize;
3211
3212 /* Normalize BITPOS. */
3213 addr += bitpos / 8;
3214 bitpos %= 8;
3215
3216 /* If a negative fieldval fits in the field in question, chop
3217 off the sign extension bits. */
3218 if ((~fieldval & ~(mask >> 1)) == 0)
3219 fieldval &= mask;
3220
3221 /* Warn if value is too big to fit in the field in question. */
3222 if (0 != (fieldval & ~mask))
3223 {
3224 /* FIXME: would like to include fieldval in the message, but
3225 we don't have a sprintf_longest. */
3226 warning (_("Value does not fit in %d bits."), bitsize);
3227
3228 /* Truncate it, otherwise adjoining fields may be corrupted. */
3229 fieldval &= mask;
3230 }
3231
3232 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3233 false valgrind reports. */
3234
3235 bytesize = (bitpos + bitsize + 7) / 8;
3236 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3237
3238 /* Shifting for bit field depends on endianness of the target machine. */
3239 if (gdbarch_bits_big_endian (get_type_arch (type)))
3240 bitpos = bytesize * 8 - bitpos - bitsize;
3241
3242 oword &= ~(mask << bitpos);
3243 oword |= fieldval << bitpos;
3244
3245 store_unsigned_integer (addr, bytesize, byte_order, oword);
3246 }
3247 \f
3248 /* Pack NUM into BUF using a target format of TYPE. */
3249
3250 void
3251 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3252 {
3253 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3254 int len;
3255
3256 type = check_typedef (type);
3257 len = TYPE_LENGTH (type);
3258
3259 switch (TYPE_CODE (type))
3260 {
3261 case TYPE_CODE_INT:
3262 case TYPE_CODE_CHAR:
3263 case TYPE_CODE_ENUM:
3264 case TYPE_CODE_FLAGS:
3265 case TYPE_CODE_BOOL:
3266 case TYPE_CODE_RANGE:
3267 case TYPE_CODE_MEMBERPTR:
3268 store_signed_integer (buf, len, byte_order, num);
3269 break;
3270
3271 case TYPE_CODE_REF:
3272 case TYPE_CODE_PTR:
3273 store_typed_address (buf, type, (CORE_ADDR) num);
3274 break;
3275
3276 default:
3277 error (_("Unexpected type (%d) encountered for integer constant."),
3278 TYPE_CODE (type));
3279 }
3280 }
3281
3282
3283 /* Pack NUM into BUF using a target format of TYPE. */
3284
3285 static void
3286 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3287 {
3288 int len;
3289 enum bfd_endian byte_order;
3290
3291 type = check_typedef (type);
3292 len = TYPE_LENGTH (type);
3293 byte_order = gdbarch_byte_order (get_type_arch (type));
3294
3295 switch (TYPE_CODE (type))
3296 {
3297 case TYPE_CODE_INT:
3298 case TYPE_CODE_CHAR:
3299 case TYPE_CODE_ENUM:
3300 case TYPE_CODE_FLAGS:
3301 case TYPE_CODE_BOOL:
3302 case TYPE_CODE_RANGE:
3303 case TYPE_CODE_MEMBERPTR:
3304 store_unsigned_integer (buf, len, byte_order, num);
3305 break;
3306
3307 case TYPE_CODE_REF:
3308 case TYPE_CODE_PTR:
3309 store_typed_address (buf, type, (CORE_ADDR) num);
3310 break;
3311
3312 default:
3313 error (_("Unexpected type (%d) encountered "
3314 "for unsigned integer constant."),
3315 TYPE_CODE (type));
3316 }
3317 }
3318
3319
3320 /* Convert C numbers into newly allocated values. */
3321
3322 struct value *
3323 value_from_longest (struct type *type, LONGEST num)
3324 {
3325 struct value *val = allocate_value (type);
3326
3327 pack_long (value_contents_raw (val), type, num);
3328 return val;
3329 }
3330
3331
3332 /* Convert C unsigned numbers into newly allocated values. */
3333
3334 struct value *
3335 value_from_ulongest (struct type *type, ULONGEST num)
3336 {
3337 struct value *val = allocate_value (type);
3338
3339 pack_unsigned_long (value_contents_raw (val), type, num);
3340
3341 return val;
3342 }
3343
3344
3345 /* Create a value representing a pointer of type TYPE to the address
3346 ADDR. The type of the created value may differ from the passed
3347 type TYPE. Make sure to retrieve the returned values's new type
3348 after this call e.g. in case of an variable length array. */
3349
3350 struct value *
3351 value_from_pointer (struct type *type, CORE_ADDR addr)
3352 {
3353 struct type *resolved_type = resolve_dynamic_type (type, addr);
3354 struct value *val = allocate_value (resolved_type);
3355
3356 store_typed_address (value_contents_raw (val),
3357 check_typedef (resolved_type), addr);
3358 return val;
3359 }
3360
3361
3362 /* Create a value of type TYPE whose contents come from VALADDR, if it
3363 is non-null, and whose memory address (in the inferior) is
3364 ADDRESS. The type of the created value may differ from the passed
3365 type TYPE. Make sure to retrieve values new type after this call.
3366 Note that TYPE is not passed through resolve_dynamic_type; this is
3367 a special API intended for use only by Ada. */
3368
3369 struct value *
3370 value_from_contents_and_address_unresolved (struct type *type,
3371 const gdb_byte *valaddr,
3372 CORE_ADDR address)
3373 {
3374 struct value *v;
3375
3376 if (valaddr == NULL)
3377 v = allocate_value_lazy (type);
3378 else
3379 v = value_from_contents (type, valaddr);
3380 set_value_address (v, address);
3381 VALUE_LVAL (v) = lval_memory;
3382 return v;
3383 }
3384
3385 /* Create a value of type TYPE whose contents come from VALADDR, if it
3386 is non-null, and whose memory address (in the inferior) is
3387 ADDRESS. The type of the created value may differ from the passed
3388 type TYPE. Make sure to retrieve values new type after this call. */
3389
3390 struct value *
3391 value_from_contents_and_address (struct type *type,
3392 const gdb_byte *valaddr,
3393 CORE_ADDR address)
3394 {
3395 struct type *resolved_type = resolve_dynamic_type (type, address);
3396 struct value *v;
3397
3398 if (valaddr == NULL)
3399 v = allocate_value_lazy (resolved_type);
3400 else
3401 v = value_from_contents (resolved_type, valaddr);
3402 set_value_address (v, address);
3403 VALUE_LVAL (v) = lval_memory;
3404 return v;
3405 }
3406
3407 /* Create a value of type TYPE holding the contents CONTENTS.
3408 The new value is `not_lval'. */
3409
3410 struct value *
3411 value_from_contents (struct type *type, const gdb_byte *contents)
3412 {
3413 struct value *result;
3414
3415 result = allocate_value (type);
3416 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3417 return result;
3418 }
3419
3420 struct value *
3421 value_from_double (struct type *type, DOUBLEST num)
3422 {
3423 struct value *val = allocate_value (type);
3424 struct type *base_type = check_typedef (type);
3425 enum type_code code = TYPE_CODE (base_type);
3426
3427 if (code == TYPE_CODE_FLT)
3428 {
3429 store_typed_floating (value_contents_raw (val), base_type, num);
3430 }
3431 else
3432 error (_("Unexpected type encountered for floating constant."));
3433
3434 return val;
3435 }
3436
3437 struct value *
3438 value_from_decfloat (struct type *type, const gdb_byte *dec)
3439 {
3440 struct value *val = allocate_value (type);
3441
3442 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3443 return val;
3444 }
3445
3446 /* Extract a value from the history file. Input will be of the form
3447 $digits or $$digits. See block comment above 'write_dollar_variable'
3448 for details. */
3449
3450 struct value *
3451 value_from_history_ref (const char *h, const char **endp)
3452 {
3453 int index, len;
3454
3455 if (h[0] == '$')
3456 len = 1;
3457 else
3458 return NULL;
3459
3460 if (h[1] == '$')
3461 len = 2;
3462
3463 /* Find length of numeral string. */
3464 for (; isdigit (h[len]); len++)
3465 ;
3466
3467 /* Make sure numeral string is not part of an identifier. */
3468 if (h[len] == '_' || isalpha (h[len]))
3469 return NULL;
3470
3471 /* Now collect the index value. */
3472 if (h[1] == '$')
3473 {
3474 if (len == 2)
3475 {
3476 /* For some bizarre reason, "$$" is equivalent to "$$1",
3477 rather than to "$$0" as it ought to be! */
3478 index = -1;
3479 *endp += len;
3480 }
3481 else
3482 {
3483 char *local_end;
3484
3485 index = -strtol (&h[2], &local_end, 10);
3486 *endp = local_end;
3487 }
3488 }
3489 else
3490 {
3491 if (len == 1)
3492 {
3493 /* "$" is equivalent to "$0". */
3494 index = 0;
3495 *endp += len;
3496 }
3497 else
3498 {
3499 char *local_end;
3500
3501 index = strtol (&h[1], &local_end, 10);
3502 *endp = local_end;
3503 }
3504 }
3505
3506 return access_value_history (index);
3507 }
3508
3509 struct value *
3510 coerce_ref_if_computed (const struct value *arg)
3511 {
3512 const struct lval_funcs *funcs;
3513
3514 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3515 return NULL;
3516
3517 if (value_lval_const (arg) != lval_computed)
3518 return NULL;
3519
3520 funcs = value_computed_funcs (arg);
3521 if (funcs->coerce_ref == NULL)
3522 return NULL;
3523
3524 return funcs->coerce_ref (arg);
3525 }
3526
3527 /* Look at value.h for description. */
3528
3529 struct value *
3530 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3531 struct type *original_type,
3532 struct value *original_value)
3533 {
3534 /* Re-adjust type. */
3535 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3536
3537 /* Add embedding info. */
3538 set_value_enclosing_type (value, enc_type);
3539 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3540
3541 /* We may be pointing to an object of some derived type. */
3542 return value_full_object (value, NULL, 0, 0, 0);
3543 }
3544
3545 struct value *
3546 coerce_ref (struct value *arg)
3547 {
3548 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3549 struct value *retval;
3550 struct type *enc_type;
3551
3552 retval = coerce_ref_if_computed (arg);
3553 if (retval)
3554 return retval;
3555
3556 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3557 return arg;
3558
3559 enc_type = check_typedef (value_enclosing_type (arg));
3560 enc_type = TYPE_TARGET_TYPE (enc_type);
3561
3562 retval = value_at_lazy (enc_type,
3563 unpack_pointer (value_type (arg),
3564 value_contents (arg)));
3565 enc_type = value_type (retval);
3566 return readjust_indirect_value_type (retval, enc_type,
3567 value_type_arg_tmp, arg);
3568 }
3569
3570 struct value *
3571 coerce_array (struct value *arg)
3572 {
3573 struct type *type;
3574
3575 arg = coerce_ref (arg);
3576 type = check_typedef (value_type (arg));
3577
3578 switch (TYPE_CODE (type))
3579 {
3580 case TYPE_CODE_ARRAY:
3581 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3582 arg = value_coerce_array (arg);
3583 break;
3584 case TYPE_CODE_FUNC:
3585 arg = value_coerce_function (arg);
3586 break;
3587 }
3588 return arg;
3589 }
3590 \f
3591
3592 /* Return the return value convention that will be used for the
3593 specified type. */
3594
3595 enum return_value_convention
3596 struct_return_convention (struct gdbarch *gdbarch,
3597 struct value *function, struct type *value_type)
3598 {
3599 enum type_code code = TYPE_CODE (value_type);
3600
3601 if (code == TYPE_CODE_ERROR)
3602 error (_("Function return type unknown."));
3603
3604 /* Probe the architecture for the return-value convention. */
3605 return gdbarch_return_value (gdbarch, function, value_type,
3606 NULL, NULL, NULL);
3607 }
3608
3609 /* Return true if the function returning the specified type is using
3610 the convention of returning structures in memory (passing in the
3611 address as a hidden first parameter). */
3612
3613 int
3614 using_struct_return (struct gdbarch *gdbarch,
3615 struct value *function, struct type *value_type)
3616 {
3617 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3618 /* A void return value is never in memory. See also corresponding
3619 code in "print_return_value". */
3620 return 0;
3621
3622 return (struct_return_convention (gdbarch, function, value_type)
3623 != RETURN_VALUE_REGISTER_CONVENTION);
3624 }
3625
3626 /* Set the initialized field in a value struct. */
3627
3628 void
3629 set_value_initialized (struct value *val, int status)
3630 {
3631 val->initialized = status;
3632 }
3633
3634 /* Return the initialized field in a value struct. */
3635
3636 int
3637 value_initialized (struct value *val)
3638 {
3639 return val->initialized;
3640 }
3641
3642 /* Called only from the value_contents and value_contents_all()
3643 macros, if the current data for a variable needs to be loaded into
3644 value_contents(VAL). Fetches the data from the user's process, and
3645 clears the lazy flag to indicate that the data in the buffer is
3646 valid.
3647
3648 If the value is zero-length, we avoid calling read_memory, which
3649 would abort. We mark the value as fetched anyway -- all 0 bytes of
3650 it.
3651
3652 This function returns a value because it is used in the
3653 value_contents macro as part of an expression, where a void would
3654 not work. The value is ignored. */
3655
3656 int
3657 value_fetch_lazy (struct value *val)
3658 {
3659 gdb_assert (value_lazy (val));
3660 allocate_value_contents (val);
3661 if (value_bitsize (val))
3662 {
3663 /* To read a lazy bitfield, read the entire enclosing value. This
3664 prevents reading the same block of (possibly volatile) memory once
3665 per bitfield. It would be even better to read only the containing
3666 word, but we have no way to record that just specific bits of a
3667 value have been fetched. */
3668 struct type *type = check_typedef (value_type (val));
3669 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3670 struct value *parent = value_parent (val);
3671 LONGEST offset = value_offset (val);
3672 LONGEST num;
3673
3674 if (value_lazy (parent))
3675 value_fetch_lazy (parent);
3676
3677 if (!value_bits_valid (parent,
3678 TARGET_CHAR_BIT * offset + value_bitpos (val),
3679 value_bitsize (val)))
3680 set_value_optimized_out (val, 1);
3681 else if (!unpack_value_bits_as_long (value_type (val),
3682 value_contents_for_printing (parent),
3683 offset,
3684 value_bitpos (val),
3685 value_bitsize (val), parent, &num))
3686 mark_value_bytes_unavailable (val,
3687 value_embedded_offset (val),
3688 TYPE_LENGTH (type));
3689 else
3690 store_signed_integer (value_contents_raw (val), TYPE_LENGTH (type),
3691 byte_order, num);
3692 }
3693 else if (VALUE_LVAL (val) == lval_memory)
3694 {
3695 CORE_ADDR addr = value_address (val);
3696 struct type *type = check_typedef (value_enclosing_type (val));
3697
3698 if (TYPE_LENGTH (type))
3699 read_value_memory (val, 0, value_stack (val),
3700 addr, value_contents_all_raw (val),
3701 TYPE_LENGTH (type));
3702 }
3703 else if (VALUE_LVAL (val) == lval_register)
3704 {
3705 struct frame_info *frame;
3706 int regnum;
3707 struct type *type = check_typedef (value_type (val));
3708 struct value *new_val = val, *mark = value_mark ();
3709
3710 /* Offsets are not supported here; lazy register values must
3711 refer to the entire register. */
3712 gdb_assert (value_offset (val) == 0);
3713
3714 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3715 {
3716 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3717
3718 frame = frame_find_by_id (frame_id);
3719 regnum = VALUE_REGNUM (new_val);
3720
3721 gdb_assert (frame != NULL);
3722
3723 /* Convertible register routines are used for multi-register
3724 values and for interpretation in different types
3725 (e.g. float or int from a double register). Lazy
3726 register values should have the register's natural type,
3727 so they do not apply. */
3728 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3729 regnum, type));
3730
3731 new_val = get_frame_register_value (frame, regnum);
3732
3733 /* If we get another lazy lval_register value, it means the
3734 register is found by reading it from the next frame.
3735 get_frame_register_value should never return a value with
3736 the frame id pointing to FRAME. If it does, it means we
3737 either have two consecutive frames with the same frame id
3738 in the frame chain, or some code is trying to unwind
3739 behind get_prev_frame's back (e.g., a frame unwind
3740 sniffer trying to unwind), bypassing its validations. In
3741 any case, it should always be an internal error to end up
3742 in this situation. */
3743 if (VALUE_LVAL (new_val) == lval_register
3744 && value_lazy (new_val)
3745 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3746 internal_error (__FILE__, __LINE__,
3747 _("infinite loop while fetching a register"));
3748 }
3749
3750 /* If it's still lazy (for instance, a saved register on the
3751 stack), fetch it. */
3752 if (value_lazy (new_val))
3753 value_fetch_lazy (new_val);
3754
3755 /* If the register was not saved, mark it optimized out. */
3756 if (value_optimized_out (new_val))
3757 set_value_optimized_out (val, 1);
3758 else
3759 {
3760 set_value_lazy (val, 0);
3761 value_contents_copy (val, value_embedded_offset (val),
3762 new_val, value_embedded_offset (new_val),
3763 TYPE_LENGTH (type));
3764 }
3765
3766 if (frame_debug)
3767 {
3768 struct gdbarch *gdbarch;
3769 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3770 regnum = VALUE_REGNUM (val);
3771 gdbarch = get_frame_arch (frame);
3772
3773 fprintf_unfiltered (gdb_stdlog,
3774 "{ value_fetch_lazy "
3775 "(frame=%d,regnum=%d(%s),...) ",
3776 frame_relative_level (frame), regnum,
3777 user_reg_map_regnum_to_name (gdbarch, regnum));
3778
3779 fprintf_unfiltered (gdb_stdlog, "->");
3780 if (value_optimized_out (new_val))
3781 {
3782 fprintf_unfiltered (gdb_stdlog, " ");
3783 val_print_optimized_out (new_val, gdb_stdlog);
3784 }
3785 else
3786 {
3787 int i;
3788 const gdb_byte *buf = value_contents (new_val);
3789
3790 if (VALUE_LVAL (new_val) == lval_register)
3791 fprintf_unfiltered (gdb_stdlog, " register=%d",
3792 VALUE_REGNUM (new_val));
3793 else if (VALUE_LVAL (new_val) == lval_memory)
3794 fprintf_unfiltered (gdb_stdlog, " address=%s",
3795 paddress (gdbarch,
3796 value_address (new_val)));
3797 else
3798 fprintf_unfiltered (gdb_stdlog, " computed");
3799
3800 fprintf_unfiltered (gdb_stdlog, " bytes=");
3801 fprintf_unfiltered (gdb_stdlog, "[");
3802 for (i = 0; i < register_size (gdbarch, regnum); i++)
3803 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3804 fprintf_unfiltered (gdb_stdlog, "]");
3805 }
3806
3807 fprintf_unfiltered (gdb_stdlog, " }\n");
3808 }
3809
3810 /* Dispose of the intermediate values. This prevents
3811 watchpoints from trying to watch the saved frame pointer. */
3812 value_free_to_mark (mark);
3813 }
3814 else if (VALUE_LVAL (val) == lval_computed
3815 && value_computed_funcs (val)->read != NULL)
3816 value_computed_funcs (val)->read (val);
3817 /* Don't call value_optimized_out on val, doing so would result in a
3818 recursive call back to value_fetch_lazy, instead check the
3819 optimized_out flag directly. */
3820 else if (val->optimized_out)
3821 /* Keep it optimized out. */;
3822 else
3823 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3824
3825 set_value_lazy (val, 0);
3826 return 0;
3827 }
3828
3829 /* Implementation of the convenience function $_isvoid. */
3830
3831 static struct value *
3832 isvoid_internal_fn (struct gdbarch *gdbarch,
3833 const struct language_defn *language,
3834 void *cookie, int argc, struct value **argv)
3835 {
3836 int ret;
3837
3838 if (argc != 1)
3839 error (_("You must provide one argument for $_isvoid."));
3840
3841 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3842
3843 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3844 }
3845
3846 void
3847 _initialize_values (void)
3848 {
3849 add_cmd ("convenience", no_class, show_convenience, _("\
3850 Debugger convenience (\"$foo\") variables and functions.\n\
3851 Convenience variables are created when you assign them values;\n\
3852 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3853 \n\
3854 A few convenience variables are given values automatically:\n\
3855 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3856 \"$__\" holds the contents of the last address examined with \"x\"."
3857 #ifdef HAVE_PYTHON
3858 "\n\n\
3859 Convenience functions are defined via the Python API."
3860 #endif
3861 ), &showlist);
3862 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3863
3864 add_cmd ("values", no_set_class, show_values, _("\
3865 Elements of value history around item number IDX (or last ten)."),
3866 &showlist);
3867
3868 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
3869 Initialize a convenience variable if necessary.\n\
3870 init-if-undefined VARIABLE = EXPRESSION\n\
3871 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
3872 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
3873 VARIABLE is already initialized."));
3874
3875 add_prefix_cmd ("function", no_class, function_command, _("\
3876 Placeholder command for showing help on convenience functions."),
3877 &functionlist, "function ", 0, &cmdlist);
3878
3879 add_internal_function ("_isvoid", _("\
3880 Check whether an expression is void.\n\
3881 Usage: $_isvoid (expression)\n\
3882 Return 1 if the expression is void, zero otherwise."),
3883 isvoid_internal_fn, NULL);
3884 }
This page took 0.159894 seconds and 5 git commands to generate.