/* Low level packing and unpacking of values for GDB, the GNU Debugger.
- Copyright (C) 1986-2017 Free Software Foundation, Inc.
+ Copyright (C) 1986-2021 Free Software Foundation, Inc.
This file is part of GDB.
#include "user-regs.h"
#include <algorithm>
#include "completer.h"
+#include "gdbsupport/selftest.h"
+#include "gdbsupport/array-view.h"
+#include "cli/cli-style.h"
+#include "expop.h"
+#include "inferior.h"
/* Definition of a user function. */
struct internal_function
/* Length of the range. */
LONGEST length;
-};
-typedef struct range range_s;
+ /* Returns true if THIS is strictly less than OTHER, useful for
+ searching. We keep ranges sorted by offset and coalesce
+ overlapping and contiguous ranges, so this just compares the
+ starting offset. */
+
+ bool operator< (const range &other) const
+ {
+ return offset < other.offset;
+ }
-DEF_VEC_O(range_s);
+ /* Returns true if THIS is equal to OTHER. */
+ bool operator== (const range &other) const
+ {
+ return offset == other.offset && length == other.length;
+ }
+};
/* Returns true if the ranges defined by [offset1, offset1+len1) and
[offset2, offset2+len2) overlap. */
return (l < h);
}
-/* Returns true if the first argument is strictly less than the
- second, useful for VEC_lower_bound. We keep ranges sorted by
- offset and coalesce overlapping and contiguous ranges, so this just
- compares the starting offset. */
-
-static int
-range_lessthan (const range_s *r1, const range_s *r2)
-{
- return r1->offset < r2->offset;
-}
-
/* Returns true if RANGES contains any range that overlaps [OFFSET,
OFFSET+LENGTH). */
static int
-ranges_contain (VEC(range_s) *ranges, LONGEST offset, LONGEST length)
+ranges_contain (const std::vector<range> &ranges, LONGEST offset,
+ LONGEST length)
{
- range_s what;
- LONGEST i;
+ range what;
what.offset = offset;
what.length = length;
care for (this is a range afterall), we need to check if the
_previous_ range overlaps the I range. E.g.,
- R
- |---|
+ R
+ |---|
|---| |---| |------| ... |--|
0 1 2 N
Then we need to check if the I range overlaps the I range itself.
E.g.,
- R
- |---|
+ R
+ |---|
|---| |---| |-------| ... |--|
0 1 2 N
I=1
*/
- i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
- if (i > 0)
+ auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
+
+ if (i > ranges.begin ())
{
- struct range *bef = VEC_index (range_s, ranges, i - 1);
+ const struct range &bef = *(i - 1);
- if (ranges_overlap (bef->offset, bef->length, offset, length))
+ if (ranges_overlap (bef.offset, bef.length, offset, length))
return 1;
}
- if (i < VEC_length (range_s, ranges))
+ if (i < ranges.end ())
{
- struct range *r = VEC_index (range_s, ranges, i);
+ const struct range &r = *i;
- if (ranges_overlap (r->offset, r->length, offset, length))
+ if (ranges_overlap (r.offset, r.length, offset, length))
return 1;
}
struct value
{
+ explicit value (struct type *type_)
+ : modifiable (1),
+ lazy (1),
+ initialized (1),
+ stack (0),
+ type (type_),
+ enclosing_type (type_)
+ {
+ }
+
+ ~value ()
+ {
+ if (VALUE_LVAL (this) == lval_computed)
+ {
+ const struct lval_funcs *funcs = location.computed.funcs;
+
+ if (funcs->free_closure)
+ funcs->free_closure (this);
+ }
+ else if (VALUE_LVAL (this) == lval_xcallable)
+ delete location.xm_worker;
+ }
+
+ DISABLE_COPY_AND_ASSIGN (value);
+
/* Type of value; either not an lval, or one of the various
different possible kinds of lval. */
- enum lval_type lval;
+ enum lval_type lval = not_lval;
/* Is it modifiable? Only relevant if lval != not_lval. */
unsigned int modifiable : 1;
used instead of read_memory to enable extra caching. */
unsigned int stack : 1;
- /* If the value has been released. */
- unsigned int released : 1;
-
/* Location of value (if lval). */
union
{
/* Closure for those functions to use. */
void *closure;
} computed;
- } location;
+ } location {};
/* Describes offset of a value within lval of a structure in target
addressable memory units. Note also the member embedded_offset
below. */
- LONGEST offset;
+ LONGEST offset = 0;
/* Only used for bitfields; number of bits contained in them. */
- LONGEST bitsize;
+ LONGEST bitsize = 0;
/* Only used for bitfields; position of start of field. For
- gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
- gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
- LONGEST bitpos;
+ little-endian targets, it is the position of the LSB. For
+ big-endian targets, it is the position of the MSB. */
+ LONGEST bitpos = 0;
/* The number of references to this value. When a value is created,
the value chain holds a reference, so REFERENCE_COUNT is 1. If
release_value is called, this value is removed from the chain but
the caller of release_value now has a reference to this value.
The caller must arrange for a call to value_free later. */
- int reference_count;
+ int reference_count = 1;
/* Only used for bitfields; the containing value. This allows a
single read from the target when displaying multiple
bitfields. */
- struct value *parent;
+ value_ref_ptr parent;
/* Type of the value. */
struct type *type;
`type', and `embedded_offset' is zero, so everything works
normally. */
struct type *enclosing_type;
- LONGEST embedded_offset;
- LONGEST pointed_to_offset;
-
- /* Values are stored in a chain, so that they can be deleted easily
- over calls to the inferior. Values assigned to internal
- variables, put into the value history or exposed to Python are
- taken off this list. */
- struct value *next;
+ LONGEST embedded_offset = 0;
+ LONGEST pointed_to_offset = 0;
/* Actual contents of the value. Target byte-order. NULL or not
valid if lazy is nonzero. */
- gdb_byte *contents;
+ gdb::unique_xmalloc_ptr<gdb_byte> contents;
/* Unavailable ranges in CONTENTS. We mark unavailable ranges,
rather than available, since the common and default case is for a
The unavailable ranges are tracked in bits. Note that a contents
bit that has been optimized out doesn't really exist in the
program, so it can't be marked unavailable either. */
- VEC(range_s) *unavailable;
+ std::vector<range> unavailable;
/* Likewise, but for optimized out contents (a chunk of the value of
a variable that does not actually exist in the program). If LVAL
saved registers and optimized-out program variables values are
treated pretty much the same, except not-saved registers have a
different string representation and related error strings. */
- VEC(range_s) *optimized_out;
+ std::vector<range> optimized_out;
};
/* See value.h. */
struct gdbarch *
get_value_arch (const struct value *value)
{
- return get_type_arch (value_type (value));
+ return value_type (value)->arch ();
}
int
if (value->lazy)
value_fetch_lazy (value);
- if (VEC_empty (range_s, value->unavailable))
+ if (value->unavailable.empty ())
return 1;
return 0;
}
static int
value_entirely_covered_by_range_vector (struct value *value,
- VEC(range_s) **ranges)
+ const std::vector<range> &ranges)
{
/* We can only tell whether the whole value is optimized out /
unavailable when we try to read it. */
if (value->lazy)
value_fetch_lazy (value);
- if (VEC_length (range_s, *ranges) == 1)
+ if (ranges.size () == 1)
{
- struct range *t = VEC_index (range_s, *ranges, 0);
+ const struct range &t = ranges[0];
- if (t->offset == 0
- && t->length == (TARGET_CHAR_BIT
- * TYPE_LENGTH (value_enclosing_type (value))))
+ if (t.offset == 0
+ && t.length == (TARGET_CHAR_BIT
+ * TYPE_LENGTH (value_enclosing_type (value))))
return 1;
}
int
value_entirely_unavailable (struct value *value)
{
- return value_entirely_covered_by_range_vector (value, &value->unavailable);
+ return value_entirely_covered_by_range_vector (value, value->unavailable);
}
int
value_entirely_optimized_out (struct value *value)
{
- return value_entirely_covered_by_range_vector (value, &value->optimized_out);
+ return value_entirely_covered_by_range_vector (value, value->optimized_out);
}
/* Insert into the vector pointed to by VECTORP the bit range starting of
OFFSET bits, and extending for the next LENGTH bits. */
static void
-insert_into_bit_range_vector (VEC(range_s) **vectorp,
+insert_into_bit_range_vector (std::vector<range> *vectorp,
LONGEST offset, LONGEST length)
{
- range_s newr;
- int i;
+ range newr;
/* Insert the range sorted. If there's overlap or the new range
would be contiguous with an existing range, merge. */
R
|-...-|
- |--| |---| |------| ... |--|
- 0 1 2 N
+ |--| |---| |------| ... |--|
+ 0 1 2 N
I=0
R
|------------------------|
- |--| |---| |------| ... |--|
- 0 1 2 N
+ |--| |---| |------| ... |--|
+ 0 1 2 N
I=0
*/
- i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
- if (i > 0)
+ auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
+ if (i > vectorp->begin ())
{
- struct range *bef = VEC_index (range_s, *vectorp, i - 1);
+ struct range &bef = *(i - 1);
- if (ranges_overlap (bef->offset, bef->length, offset, length))
+ if (ranges_overlap (bef.offset, bef.length, offset, length))
{
/* #1 */
- ULONGEST l = std::min (bef->offset, offset);
- ULONGEST h = std::max (bef->offset + bef->length, offset + length);
+ ULONGEST l = std::min (bef.offset, offset);
+ ULONGEST h = std::max (bef.offset + bef.length, offset + length);
- bef->offset = l;
- bef->length = h - l;
+ bef.offset = l;
+ bef.length = h - l;
i--;
}
- else if (offset == bef->offset + bef->length)
+ else if (offset == bef.offset + bef.length)
{
/* #2 */
- bef->length += length;
+ bef.length += length;
i--;
}
else
{
/* #3 */
- VEC_safe_insert (range_s, *vectorp, i, &newr);
+ i = vectorp->insert (i, newr);
}
}
else
{
/* #4 */
- VEC_safe_insert (range_s, *vectorp, i, &newr);
+ i = vectorp->insert (i, newr);
}
/* Check whether the ranges following the one we've just added or
touched can be folded in (#5 above). */
- if (i + 1 < VEC_length (range_s, *vectorp))
+ if (i != vectorp->end () && i + 1 < vectorp->end ())
{
- struct range *t;
- struct range *r;
int removed = 0;
- int next = i + 1;
+ auto next = i + 1;
/* Get the range we just touched. */
- t = VEC_index (range_s, *vectorp, i);
+ struct range &t = *i;
removed = 0;
i = next;
- for (; VEC_iterate (range_s, *vectorp, i, r); i++)
- if (r->offset <= t->offset + t->length)
- {
- ULONGEST l, h;
+ for (; i < vectorp->end (); i++)
+ {
+ struct range &r = *i;
+ if (r.offset <= t.offset + t.length)
+ {
+ ULONGEST l, h;
- l = std::min (t->offset, r->offset);
- h = std::max (t->offset + t->length, r->offset + r->length);
+ l = std::min (t.offset, r.offset);
+ h = std::max (t.offset + t.length, r.offset + r.length);
- t->offset = l;
- t->length = h - l;
+ t.offset = l;
+ t.length = h - l;
- removed++;
- }
- else
- {
- /* If we couldn't merge this one, we won't be able to
- merge following ones either, since the ranges are
- always sorted by OFFSET. */
- break;
- }
+ removed++;
+ }
+ else
+ {
+ /* If we couldn't merge this one, we won't be able to
+ merge following ones either, since the ranges are
+ always sorted by OFFSET. */
+ break;
+ }
+ }
if (removed != 0)
- VEC_block_remove (range_s, *vectorp, next, removed);
+ vectorp->erase (next, next + removed);
}
}
found, or -1 if none was found. */
static int
-find_first_range_overlap (VEC(range_s) *ranges, int pos,
+find_first_range_overlap (const std::vector<range> *ranges, int pos,
LONGEST offset, LONGEST length)
{
- range_s *r;
int i;
- for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
- if (ranges_overlap (r->offset, r->length, offset, length))
- return i;
+ for (i = pos; i < ranges->size (); i++)
+ {
+ const range &r = (*ranges)[i];
+ if (ranges_overlap (r.offset, r.length, offset, length))
+ return i;
+ }
return -1;
}
PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
to:
PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
- / TARGET_CHAR_BIT) */
+ / TARGET_CHAR_BIT) */
static int
memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
const gdb_byte *ptr2, size_t offset2_bits,
struct ranges_and_idx
{
/* The ranges. */
- VEC(range_s) *ranges;
+ const std::vector<range> *ranges;
/* The range we've last found in RANGES. Given ranges are sorted,
we can start the next lookup here. */
return 0;
else
{
- range_s *r1, *r2;
+ const range *r1, *r2;
ULONGEST l1, h1;
ULONGEST l2, h2;
- r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
- r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
+ r1 = &(*rp1->ranges)[rp1->idx];
+ r2 = &(*rp2->ranges)[rp2->idx];
/* Get the unavailable windows intersected by the incoming
ranges. The first and last ranges that overlap the argument
memset (&rp1, 0, sizeof (rp1));
memset (&rp2, 0, sizeof (rp2));
- rp1[0].ranges = val1->unavailable;
- rp2[0].ranges = val2->unavailable;
- rp1[1].ranges = val1->optimized_out;
- rp2[1].ranges = val2->optimized_out;
+ rp1[0].ranges = &val1->unavailable;
+ rp2[0].ranges = &val2->unavailable;
+ rp1[1].ranges = &val1->optimized_out;
+ rp2[1].ranges = &val2->optimized_out;
while (length > 0)
{
}
/* Compare the available/valid contents. */
- if (memcmp_with_bit_offsets (val1->contents, offset1,
- val2->contents, offset2, l) != 0)
+ if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
+ val2->contents.get (), offset2, l) != 0)
return false;
length -= h;
}
-/* The value-history records all the values printed
- by print commands during this session. Each chunk
- records 60 consecutive values. The first chunk on
- the chain records the most recent values.
- The total number of values is in value_history_count. */
-
-#define VALUE_HISTORY_CHUNK 60
-
-struct value_history_chunk
- {
- struct value_history_chunk *next;
- struct value *values[VALUE_HISTORY_CHUNK];
- };
+/* The value-history records all the values printed by print commands
+ during this session. */
-/* Chain of chunks now in use. */
-
-static struct value_history_chunk *value_history_chain;
-
-static int value_history_count; /* Abs number of last entry stored. */
+static std::vector<value_ref_ptr> value_history;
\f
/* List of all value objects currently allocated
(except for those released by calls to release_value)
This is so they can be freed after each command. */
-static struct value *all_values;
+static std::vector<value_ref_ptr> all_values;
/* Allocate a lazy value for type TYPE. Its actual content is
"lazily" allocated too: the content field of the return value is
description correctly. */
check_typedef (type);
- val = XCNEW (struct value);
- val->contents = NULL;
- val->next = all_values;
- all_values = val;
- val->type = type;
- val->enclosing_type = type;
- VALUE_LVAL (val) = not_lval;
- val->location.address = 0;
- val->offset = 0;
- val->bitpos = 0;
- val->bitsize = 0;
- val->lazy = 1;
- val->embedded_offset = 0;
- val->pointed_to_offset = 0;
- val->modifiable = 1;
- val->initialized = 1; /* Default to initialized. */
+ val = new struct value (type);
/* Values start out on the all_values chain. */
- val->reference_count = 1;
+ all_values.emplace_back (val);
return val;
}
static void
check_type_length_before_alloc (const struct type *type)
{
- unsigned int length = TYPE_LENGTH (type);
+ ULONGEST length = TYPE_LENGTH (type);
if (max_value_size > -1 && length > max_value_size)
{
- if (TYPE_NAME (type) != NULL)
- error (_("value of type `%s' requires %u bytes, which is more "
- "than max-value-size"), TYPE_NAME (type), length);
+ if (type->name () != NULL)
+ error (_("value of type `%s' requires %s bytes, which is more "
+ "than max-value-size"), type->name (), pulongest (length));
else
- error (_("value requires %u bytes, which is more than "
- "max-value-size"), length);
+ error (_("value requires %s bytes, which is more than "
+ "max-value-size"), pulongest (length));
}
}
if (!val->contents)
{
check_type_length_before_alloc (val->enclosing_type);
- val->contents
- = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
+ val->contents.reset
+ ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
}
}
struct value *
allocate_repeat_value (struct type *type, int count)
{
- int low_bound = current_language->string_lower_bound; /* ??? */
+ /* Despite the fact that we are really creating an array of TYPE here, we
+ use the string lower bound as the array lower bound. This seems to
+ work fine for now. */
+ int low_bound = current_language->string_lower_bound ();
/* FIXME-type-allocation: need a way to free this type when we are
done with it. */
struct type *array_type
struct value *
allocate_computed_value (struct type *type,
- const struct lval_funcs *funcs,
- void *closure)
+ const struct lval_funcs *funcs,
+ void *closure)
{
struct value *v = allocate_value_lazy (type);
/* Accessor methods. */
-struct value *
-value_next (const struct value *value)
-{
- return value->next;
-}
-
struct type *
value_type (const struct value *value)
{
struct value *
value_parent (const struct value *value)
{
- return value->parent;
+ return value->parent.get ();
}
/* See value.h. */
void
set_value_parent (struct value *value, struct value *parent)
{
- struct value *old = value->parent;
-
- value->parent = parent;
- if (parent != NULL)
- value_incref (parent);
- value_free (old);
+ value->parent = value_ref_ptr::new_reference (parent);
}
gdb_byte *
int unit_size = gdbarch_addressable_memory_unit_size (arch);
allocate_value_contents (value);
- return value->contents + value->embedded_offset * unit_size;
+ return value->contents.get () + value->embedded_offset * unit_size;
}
gdb_byte *
value_contents_all_raw (struct value *value)
{
allocate_value_contents (value);
- return value->contents;
+ return value->contents.get ();
}
struct type *
{
/* If result's target type is TYPE_CODE_STRUCT, proceed to
fetch its rtti type. */
- if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
- && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
- == TYPE_CODE_STRUCT
+ if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
+ && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
+ == TYPE_CODE_STRUCT)
&& !value_optimized_out (value))
- {
- struct type *real_type;
-
- real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
- if (real_type)
- {
- if (real_type_found)
- *real_type_found = 1;
- result = real_type;
- }
- }
+ {
+ struct type *real_type;
+
+ real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
+ if (real_type)
+ {
+ if (real_type_found)
+ *real_type_found = 1;
+ result = real_type;
+ }
+ }
else if (resolve_simple_types)
- {
- if (real_type_found)
- *real_type_found = 1;
- result = value_enclosing_type (value);
- }
+ {
+ if (real_type_found)
+ *real_type_found = 1;
+ result = value_enclosing_type (value);
+ }
}
return result;
static void
require_not_optimized_out (const struct value *value)
{
- if (!VEC_empty (range_s, value->optimized_out))
+ if (!value->optimized_out.empty ())
{
if (value->lval == lval_register)
error (_("register has not been saved in frame"));
static void
require_available (const struct value *value)
{
- if (!VEC_empty (range_s, value->unavailable))
+ if (!value->unavailable.empty ())
throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
}
{
if (value->lazy)
value_fetch_lazy (value);
- return value->contents;
+ return value->contents.get ();
}
const gdb_byte *
value_contents_for_printing_const (const struct value *value)
{
gdb_assert (!value->lazy);
- return value->contents;
+ return value->contents.get ();
}
const gdb_byte *
SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
static void
-ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
- VEC (range_s) *src_range, int src_bit_offset,
+ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
+ const std::vector<range> &src_range, int src_bit_offset,
int bit_length)
{
- range_s *r;
- int i;
-
- for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
+ for (const range &r : src_range)
{
ULONGEST h, l;
- l = std::max (r->offset, (LONGEST) src_bit_offset);
- h = std::min (r->offset + r->length,
+ l = std::max (r.offset, (LONGEST) src_bit_offset);
+ h = std::min (r.offset + r.length,
(LONGEST) src_bit_offset + bit_length);
if (l < h)
It is assumed the contents of DST in the [DST_OFFSET,
DST_OFFSET+LENGTH) range are wholly available. */
-void
+static void
value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
struct value *src, LONGEST src_offset, LONGEST length)
{
{
/* We can only know if a value is optimized out once we have tried to
fetch it. */
- if (VEC_empty (range_s, value->optimized_out) && value->lazy)
+ if (value->optimized_out.empty () && value->lazy)
{
- TRY
+ try
{
value_fetch_lazy (value);
}
- CATCH (ex, RETURN_MASK_ERROR)
+ catch (const gdb_exception_error &ex)
{
- /* Fall back to checking value->optimized_out. */
+ switch (ex.error)
+ {
+ case MEMORY_ERROR:
+ case OPTIMIZED_OUT_ERROR:
+ case NOT_AVAILABLE_ERROR:
+ /* These can normally happen when we try to access an
+ optimized out or unavailable register, either in a
+ physical register or spilled to memory. */
+ break;
+ default:
+ throw;
+ }
}
- END_CATCH
}
- return !VEC_empty (range_s, value->optimized_out);
+ return !value->optimized_out.empty ();
}
/* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
if (value->lval != lval_memory)
return 0;
if (value->parent != NULL)
- return value_address (value->parent) + value->offset;
+ return value_address (value->parent.get ()) + value->offset;
if (NULL != TYPE_DATA_LOCATION (value_type (value)))
{
gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
struct value *
value_mark (void)
{
- return all_values;
+ if (all_values.empty ())
+ return nullptr;
+ return all_values.back ().get ();
}
-/* Take a reference to VAL. VAL will not be deallocated until all
- references are released. */
+/* See value.h. */
void
value_incref (struct value *val)
chain. */
void
-value_free (struct value *val)
+value_decref (struct value *val)
{
- if (val)
+ if (val != nullptr)
{
gdb_assert (val->reference_count > 0);
val->reference_count--;
- if (val->reference_count > 0)
- return;
-
- /* If there's an associated parent value, drop our reference to
- it. */
- if (val->parent != NULL)
- value_free (val->parent);
-
- if (VALUE_LVAL (val) == lval_computed)
- {
- const struct lval_funcs *funcs = val->location.computed.funcs;
-
- if (funcs->free_closure)
- funcs->free_closure (val);
- }
- else if (VALUE_LVAL (val) == lval_xcallable)
- free_xmethod_worker (val->location.xm_worker);
-
- xfree (val->contents);
- VEC_free (range_s, val->unavailable);
+ if (val->reference_count == 0)
+ delete val;
}
- xfree (val);
}
/* Free all values allocated since MARK was obtained by value_mark
void
value_free_to_mark (const struct value *mark)
{
- struct value *val;
- struct value *next;
-
- for (val = all_values; val && val != mark; val = next)
- {
- next = val->next;
- val->released = 1;
- value_free (val);
- }
- all_values = val;
-}
-
-/* Free all the values that have been allocated (except for those released).
- Call after each command, successful or not.
- In practice this is called before each command, which is sufficient. */
-
-void
-free_all_values (void)
-{
- struct value *val;
- struct value *next;
-
- for (val = all_values; val; val = next)
- {
- next = val->next;
- val->released = 1;
- value_free (val);
- }
-
- all_values = 0;
-}
-
-/* Frees all the elements in a chain of values. */
-
-void
-free_value_chain (struct value *v)
-{
- struct value *next;
-
- for (; v; v = next)
- {
- next = value_next (v);
- value_free (v);
- }
+ auto iter = std::find (all_values.begin (), all_values.end (), mark);
+ if (iter == all_values.end ())
+ all_values.clear ();
+ else
+ all_values.erase (iter + 1, all_values.end ());
}
/* Remove VAL from the chain all_values
so it will not be freed automatically. */
-void
+value_ref_ptr
release_value (struct value *val)
{
- struct value *v;
-
- if (all_values == val)
- {
- all_values = val->next;
- val->next = NULL;
- val->released = 1;
- return;
- }
+ if (val == nullptr)
+ return value_ref_ptr ();
- for (v = all_values; v; v = v->next)
+ std::vector<value_ref_ptr>::reverse_iterator iter;
+ for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
{
- if (v->next == val)
+ if (*iter == val)
{
- v->next = val->next;
- val->next = NULL;
- val->released = 1;
- break;
+ value_ref_ptr result = *iter;
+ all_values.erase (iter.base () - 1);
+ return result;
}
}
-}
-/* If the value is not already released, release it.
- If the value is already released, increment its reference count.
- That is, this function ensures that the value is released from the
- value chain and that the caller owns a reference to it. */
-
-void
-release_value_or_incref (struct value *val)
-{
- if (val->released)
- value_incref (val);
- else
- release_value (val);
+ /* We must always return an owned reference. Normally this happens
+ because we transfer the reference from the value chain, but in
+ this case the value was not on the chain. */
+ return value_ref_ptr::new_reference (val);
}
-/* Release all values up to mark */
-struct value *
+/* See value.h. */
+
+std::vector<value_ref_ptr>
value_release_to_mark (const struct value *mark)
{
- struct value *val;
- struct value *next;
+ std::vector<value_ref_ptr> result;
- for (val = next = all_values; next; next = next->next)
+ auto iter = std::find (all_values.begin (), all_values.end (), mark);
+ if (iter == all_values.end ())
+ std::swap (result, all_values);
+ else
{
- if (next->next == mark)
- {
- all_values = next->next;
- next->next = NULL;
- return val;
- }
- next->released = 1;
+ std::move (iter + 1, all_values.end (), std::back_inserter (result));
+ all_values.erase (iter + 1, all_values.end ());
}
- all_values = 0;
- return val;
+ std::reverse (result.begin (), result.end ());
+ return result;
}
/* Return a copy of the value ARG.
TYPE_LENGTH (value_enclosing_type (arg)));
}
- val->unavailable = VEC_copy (range_s, arg->unavailable);
- val->optimized_out = VEC_copy (range_s, arg->optimized_out);
- set_value_parent (val, arg->parent);
+ val->unavailable = arg->unavailable;
+ val->optimized_out = arg->optimized_out;
+ val->parent = arg->parent;
if (VALUE_LVAL (val) == lval_computed)
{
const struct lval_funcs *funcs = val->location.computed.funcs;
if (funcs->copy_closure)
- val->location.computed.closure = funcs->copy_closure (val);
+ val->location.computed.closure = funcs->copy_closure (val);
}
return val;
}
const struct lval_funcs *funcs = whole->location.computed.funcs;
if (funcs->copy_closure)
- component->location.computed.closure = funcs->copy_closure (whole);
+ component->location.computed.closure = funcs->copy_closure (whole);
}
- /* If type has a dynamic resolved location property
- update it's value address. */
+ /* If the WHOLE value has a dynamically resolved location property then
+ update the address of the COMPONENT. */
type = value_type (whole);
if (NULL != TYPE_DATA_LOCATION (type)
&& TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
+
+ /* Similarly, if the COMPONENT value has a dynamically resolved location
+ property then update its address. */
+ type = value_type (component);
+ if (NULL != TYPE_DATA_LOCATION (type)
+ && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
+ {
+ /* If the COMPONENT has a dynamic location, and is an
+ lval_internalvar_component, then we change it to a lval_memory.
+
+ Usually a component of an internalvar is created non-lazy, and has
+ its content immediately copied from the parent internalvar.
+ However, for components with a dynamic location, the content of
+ the component is not contained within the parent, but is instead
+ accessed indirectly. Further, the component will be created as a
+ lazy value.
+
+ By changing the type of the component to lval_memory we ensure
+ that value_fetch_lazy can successfully load the component.
+
+ This solution isn't ideal, but a real fix would require values to
+ carry around both the parent value contents, and the contents of
+ any dynamic fields within the parent. This is a substantial
+ change to how values work in GDB. */
+ if (VALUE_LVAL (component) == lval_internalvar_component)
+ {
+ gdb_assert (value_lazy (component));
+ VALUE_LVAL (component) = lval_memory;
+ }
+ else
+ gdb_assert (VALUE_LVAL (component) == lval_memory);
+ set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
+ }
}
/* Access to the value history. */
int
record_latest_value (struct value *val)
{
- int i;
-
/* We don't want this value to have anything to do with the inferior anymore.
In particular, "set $1 = 50" should not affect the variable from which
the value was taken, and fast watchpoints should be able to assume that
but the current contents of that location. c'est la vie... */
val->modifiable = 0;
- /* The value may have already been released, in which case we're adding a
- new reference for its entry in the history. That is why we call
- release_value_or_incref here instead of release_value. */
- release_value_or_incref (val);
-
- /* Here we treat value_history_count as origin-zero
- and applying to the value being stored now. */
-
- i = value_history_count % VALUE_HISTORY_CHUNK;
- if (i == 0)
- {
- struct value_history_chunk *newobj = XCNEW (struct value_history_chunk);
-
- newobj->next = value_history_chain;
- value_history_chain = newobj;
- }
-
- value_history_chain->values[i] = val;
-
- /* Now we regard value_history_count as origin-one
- and applying to the value just stored. */
+ value_history.push_back (release_value (val));
- return ++value_history_count;
+ return value_history.size ();
}
/* Return a copy of the value in the history with sequence number NUM. */
struct value *
access_value_history (int num)
{
- struct value_history_chunk *chunk;
- int i;
int absnum = num;
if (absnum <= 0)
- absnum += value_history_count;
+ absnum += value_history.size ();
if (absnum <= 0)
{
else
error (_("History does not go back to $$%d."), -num);
}
- if (absnum > value_history_count)
+ if (absnum > value_history.size ())
error (_("History has not yet reached $%d."), absnum);
absnum--;
- /* Now absnum is always absolute and origin zero. */
-
- chunk = value_history_chain;
- for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
- - absnum / VALUE_HISTORY_CHUNK;
- i > 0; i--)
- chunk = chunk->next;
-
- return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
+ return value_copy (value_history[absnum].get ());
}
static void
if (num_exp)
{
/* "show values +" should print from the stored position.
- "show values <exp>" should print around value number <exp>. */
+ "show values <exp>" should print around value number <exp>. */
if (num_exp[0] != '+' || num_exp[1] != '\0')
num = parse_and_eval_long (num_exp) - 5;
}
else
{
/* "show values" means print the last 10 values. */
- num = value_history_count - 9;
+ num = value_history.size () - 9;
}
if (num <= 0)
num = 1;
- for (i = num; i < num + 10 && i <= value_history_count; i++)
+ for (i = num; i < num + 10 && i <= value_history.size (); i++)
{
struct value_print_options opts;
static void
init_if_undefined_command (const char* args, int from_tty)
{
- struct internalvar* intvar;
+ struct internalvar *intvar = nullptr;
/* Parse the expression - this is taken from set_command(). */
expression_up expr = parse_expression (args);
/* Validate the expression.
Was the expression an assignment?
Or even an expression at all? */
- if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
+ if (expr->first_opcode () != BINOP_ASSIGN)
error (_("Init-if-undefined requires an assignment expression."));
- /* Extract the variable from the parsed expression.
- In the case of an assign the lvalue will be in elts[1] and elts[2]. */
- if (expr->elts[1].opcode != OP_INTERNALVAR)
+ /* Extract the variable from the parsed expression. */
+ expr::assign_operation *assign
+ = dynamic_cast<expr::assign_operation *> (expr->op.get ());
+ if (assign != nullptr)
+ {
+ expr::operation *lhs = assign->get_lhs ();
+ expr::internalvar_operation *ivarop
+ = dynamic_cast<expr::internalvar_operation *> (lhs);
+ if (ivarop != nullptr)
+ intvar = ivarop->get_internalvar ();
+ }
+
+ if (intvar == nullptr)
error (_("The first parameter to init-if-undefined "
"should be a GDB variable."));
- intvar = expr->elts[2].internalvar;
/* Only evaluate the expression if the lvalue is void.
- This may still fail if the expresssion is invalid. */
+ This may still fail if the expression is invalid. */
if (intvar->kind == INTERNALVAR_VOID)
evaluate_expression (expr.get ());
}
for (var = internalvars; var; var = var->next)
if (strncmp (var->name, name, len) == 0)
- {
- gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
-
- tracker.add_completion (std::move (copy));
- }
+ tracker.add_completion (make_unique_xstrdup (var->name));
}
/* Create an internal variable with name NAME and with a void value.
{
struct internalvar *var = XNEW (struct internalvar);
- var->name = concat (name, (char *)NULL);
+ var->name = xstrdup (name);
var->kind = INTERNALVAR_VOID;
var->next = internalvars;
internalvars = var;
on this value go back to affect the original internal variable.
Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
- no underlying modifyable state in the internal variable.
+ no underlying modifiable state in the internal variable.
Likewise, if the variable's value is a computed lvalue, we want
references to it to produce another computed lvalue, where
{
struct type *type = check_typedef (value_type (var->u.value));
- if (TYPE_CODE (type) == TYPE_CODE_INT)
+ if (type->code () == TYPE_CODE_INT)
{
*result = value_as_long (var->u.value);
return 1;
error (_("Cannot overwrite convenience function %s"), var->name);
/* Prepare new contents. */
- switch (TYPE_CODE (check_typedef (value_type (val))))
+ switch (check_typedef (value_type (val))->code ())
{
case TYPE_CODE_VOID:
new_kind = INTERNALVAR_VOID;
default:
new_kind = INTERNALVAR_VALUE;
- new_data.value = value_copy (val);
- new_data.value->modifiable = 1;
+ struct value *copy = value_copy (val);
+ copy->modifiable = 1;
/* Force the value to be fetched from the target now, to avoid problems
later when this internalvar is referenced and the target is gone or
has changed. */
- if (value_lazy (new_data.value))
- value_fetch_lazy (new_data.value);
+ if (value_lazy (copy))
+ value_fetch_lazy (copy);
/* Release the value from the value chain to prevent it from being
deleted by free_all_values. From here on this function should not
call error () until new_data is installed into the var->u to avoid
leaking memory. */
- release_value (new_data.value);
+ new_data.value = release_value (copy).release ();
/* Internal variables which are created from values with a dynamic
- location don't need the location property of the origin anymore.
- The resolved dynamic location is used prior then any other address
- when accessing the value.
- If we keep it, we would still refer to the origin value.
- Remove the location property in case it exist. */
- remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
+ location don't need the location property of the origin anymore.
+ The resolved dynamic location is used prior then any other address
+ when accessing the value.
+ If we keep it, we would still refer to the origin value.
+ Remove the location property in case it exist. */
+ value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
break;
}
switch (var->kind)
{
case INTERNALVAR_VALUE:
- value_free (var->u.value);
+ value_decref (var->u.value);
break;
case INTERNALVAR_STRING:
var->kind = INTERNALVAR_VOID;
}
-char *
+const char *
internalvar_name (const struct internalvar *var)
{
return var->name;
return ifn;
}
-char *
+const char *
value_internal_function_name (struct value *val)
{
struct internal_function *ifn;
/* Do nothing. */
}
-/* Clean up if an internal function's command is destroyed. */
-static void
-function_destroyer (struct cmd_list_element *self, void *ignore)
+/* Helper function that does the work for add_internal_function. */
+
+static struct cmd_list_element *
+do_add_internal_function (const char *name, const char *doc,
+ internal_function_fn handler, void *cookie)
{
- xfree ((char *) self->name);
- xfree ((char *) self->doc);
+ struct internal_function *ifn;
+ struct internalvar *var = lookup_internalvar (name);
+
+ ifn = create_internal_function (name, handler, cookie);
+ set_internalvar_function (var, ifn);
+
+ return add_cmd (name, no_class, function_command, doc, &functionlist);
}
-/* Add a new internal function. NAME is the name of the function; DOC
- is a documentation string describing the function. HANDLER is
- called when the function is invoked. COOKIE is an arbitrary
- pointer which is passed to HANDLER and is intended for "user
- data". */
+/* See value.h. */
+
void
add_internal_function (const char *name, const char *doc,
internal_function_fn handler, void *cookie)
{
- struct cmd_list_element *cmd;
- struct internal_function *ifn;
- struct internalvar *var = lookup_internalvar (name);
+ do_add_internal_function (name, doc, handler, cookie);
+}
- ifn = create_internal_function (name, handler, cookie);
- set_internalvar_function (var, ifn);
+/* See value.h. */
- cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
- &functionlist);
- cmd->destroyer = function_destroyer;
+void
+add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
+ gdb::unique_xmalloc_ptr<char> &&doc,
+ internal_function_fn handler, void *cookie)
+{
+ struct cmd_list_element *cmd
+ = do_add_internal_function (name.get (), doc.get (), handler, cookie);
+ doc.release ();
+ cmd->doc_allocated = 1;
+ name.release ();
+ cmd->name_allocated = 1;
}
/* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
preserve_one_value (struct value *value, struct objfile *objfile,
htab_t copied_types)
{
- if (TYPE_OBJFILE (value->type) == objfile)
+ if (value->type->objfile_owner () == objfile)
value->type = copy_type_recursive (objfile, value->type, copied_types);
- if (TYPE_OBJFILE (value->enclosing_type) == objfile)
+ if (value->enclosing_type->objfile_owner () == objfile)
value->enclosing_type = copy_type_recursive (objfile,
value->enclosing_type,
copied_types);
switch (var->kind)
{
case INTERNALVAR_INTEGER:
- if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
+ if (var->u.integer.type
+ && var->u.integer.type->objfile_owner () == objfile)
var->u.integer.type
= copy_type_recursive (objfile, var->u.integer.type, copied_types);
break;
void
preserve_values (struct objfile *objfile)
{
- htab_t copied_types;
- struct value_history_chunk *cur;
struct internalvar *var;
- int i;
/* Create the hash table. We allocate on the objfile's obstack, since
it is soon to be deleted. */
- copied_types = create_copied_types_hash (objfile);
+ htab_up copied_types = create_copied_types_hash (objfile);
- for (cur = value_history_chain; cur; cur = cur->next)
- for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
- if (cur->values[i])
- preserve_one_value (cur->values[i], objfile, copied_types);
+ for (const value_ref_ptr &item : value_history)
+ preserve_one_value (item.get (), objfile, copied_types.get ());
for (var = internalvars; var; var = var->next)
- preserve_one_internalvar (var, objfile, copied_types);
-
- preserve_ext_lang_values (objfile, copied_types);
+ preserve_one_internalvar (var, objfile, copied_types.get ());
- htab_delete (copied_types);
+ preserve_ext_lang_values (objfile, copied_types.get ());
}
static void
}
printf_filtered (("$%s = "), var->name);
- TRY
+ try
{
struct value *val;
val = value_of_internalvar (gdbarch, var);
value_print (val, gdb_stdout, &opts);
}
- CATCH (ex, RETURN_MASK_ERROR)
+ catch (const gdb_exception_error &ex)
{
- fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
+ fprintf_styled (gdb_stdout, metadata_style.style (),
+ _("<error: %s>"), ex.what ());
}
- END_CATCH
printf_filtered (("\n"));
}
}
}
\f
-/* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
+
+/* See value.h. */
struct value *
-value_of_xmethod (struct xmethod_worker *worker)
+value_from_xmethod (xmethod_worker_up &&worker)
{
- if (worker->value == NULL)
- {
- struct value *v;
+ struct value *v;
- v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
- v->lval = lval_xcallable;
- v->location.xm_worker = worker;
- v->modifiable = 0;
- worker->value = v;
- }
+ v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
+ v->lval = lval_xcallable;
+ v->location.xm_worker = worker.release ();
+ v->modifiable = 0;
- return worker->value;
+ return v;
}
/* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
struct type *
-result_type_of_xmethod (struct value *method, int argc, struct value **argv)
+result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
{
- gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
- && method->lval == lval_xcallable && argc > 0);
+ gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
+ && method->lval == lval_xcallable && !argv.empty ());
- return get_xmethod_result_type (method->location.xm_worker,
- argv[0], argv + 1, argc - 1);
+ return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
}
/* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
struct value *
-call_xmethod (struct value *method, int argc, struct value **argv)
+call_xmethod (struct value *method, gdb::array_view<value *> argv)
{
- gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
- && method->lval == lval_xcallable && argc > 0);
+ gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
+ && method->lval == lval_xcallable && !argv.empty ());
- return invoke_xmethod (method->location.xm_worker,
- argv[0], argv + 1, argc - 1);
+ return method->location.xm_worker->invoke (argv[0], argv.slice (1));
}
\f
/* Extract a value as a C number (either long or double).
CORE_ADDR
value_as_address (struct value *val)
{
- struct gdbarch *gdbarch = get_type_arch (value_type (val));
+ struct gdbarch *gdbarch = value_type (val)->arch ();
/* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
whether we want this to be true eventually. */
The following shortcut avoids this whole mess. If VAL is a
function, just return its address directly. */
- if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
- || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
+ if (value_type (val)->code () == TYPE_CODE_FUNC
+ || value_type (val)->code () == TYPE_CODE_METHOD)
return value_address (val);
val = coerce_array (val);
converted to pointers; usually, the ABI doesn't either, but
ABI-specific code is a more reasonable place to handle it. */
- if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
+ if (value_type (val)->code () != TYPE_CODE_PTR
&& !TYPE_IS_REFERENCE (value_type (val))
&& gdbarch_integer_to_address_p (gdbarch))
return gdbarch_integer_to_address (gdbarch, value_type (val),
LONGEST
unpack_long (struct type *type, const gdb_byte *valaddr)
{
- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
- enum type_code code = TYPE_CODE (type);
+ if (is_fixed_point_type (type))
+ type = type->fixed_point_type_base_type ();
+
+ enum bfd_endian byte_order = type_byte_order (type);
+ enum type_code code = type->code ();
int len = TYPE_LENGTH (type);
- int nosign = TYPE_UNSIGNED (type);
+ int nosign = type->is_unsigned ();
switch (code)
{
case TYPE_CODE_CHAR:
case TYPE_CODE_RANGE:
case TYPE_CODE_MEMBERPTR:
- if (nosign)
- return extract_unsigned_integer (valaddr, len, byte_order);
- else
- return extract_signed_integer (valaddr, len, byte_order);
+ {
+ LONGEST result;
+
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ if (bit_size == 0)
+ {
+ /* unpack_bits_as_long doesn't handle this case the
+ way we'd like, so handle it here. */
+ result = 0;
+ }
+ else
+ result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
+ }
+ else
+ {
+ if (nosign)
+ result = extract_unsigned_integer (valaddr, len, byte_order);
+ else
+ result = extract_signed_integer (valaddr, len, byte_order);
+ }
+ if (code == TYPE_CODE_RANGE)
+ result += type->bounds ()->bias;
+ return result;
+ }
case TYPE_CODE_FLT:
case TYPE_CODE_DECFLOAT:
return target_float_to_longest (valaddr, type);
+ case TYPE_CODE_FIXED_POINT:
+ {
+ gdb_mpq vq;
+ vq.read_fixed_point (gdb::make_array_view (valaddr, len),
+ byte_order, nosign,
+ type->fixed_point_scaling_factor ());
+
+ gdb_mpz vz;
+ mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
+ return vz.as_integer<LONGEST> ();
+ }
+
case TYPE_CODE_PTR:
case TYPE_CODE_REF:
case TYPE_CODE_RVALUE_REF:
/* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
- whether we want this to be true eventually. */
+ whether we want this to be true eventually. */
return extract_typed_address (valaddr, type);
default:
error (_("Value can't be converted to integer."));
}
- return 0; /* Placate lint. */
}
/* Unpack raw data (copied from debugee, target byte order) at VALADDR
switch (TYPE_FIELD_LOC_KIND (type, fieldno))
{
case FIELD_LOC_KIND_PHYSADDR:
- retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
+ retval = value_at_lazy (type->field (fieldno).type (),
TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
break;
case FIELD_LOC_KIND_PHYSNAME:
reported as non-debuggable symbols. */
struct bound_minimal_symbol msym
= lookup_minimal_symbol (phys_name, NULL, NULL);
+ struct type *field_type = type->field (fieldno).type ();
if (!msym.minsym)
- return allocate_optimized_out_value (type);
+ retval = allocate_optimized_out_value (field_type);
else
- {
- retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
- BMSYMBOL_VALUE_ADDRESS (msym));
- }
+ retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
}
else
retval = value_of_variable (sym.symbol, sym.block);
{
check_type_length_before_alloc (new_encl_type);
val->contents
- = (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
+ .reset ((gdb_byte *) xrealloc (val->contents.release (),
+ TYPE_LENGTH (new_encl_type)));
}
val->enclosing_type = new_encl_type;
int unit_size = gdbarch_addressable_memory_unit_size (arch);
arg_type = check_typedef (arg_type);
- type = TYPE_FIELD_TYPE (arg_type, fieldno);
+ type = arg_type->field (fieldno).type ();
/* Call check_typedef on our type to make sure that, if TYPE
is a TYPE_CODE_TYPEDEF, its length is set to the length
/* We expect an already resolved data location. */
gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
/* For dynamic data types defer memory allocation
- until we actual access the value. */
+ until we actual access the value. */
v = allocate_value_lazy (type);
}
else
{
/* Plain old data member */
offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
- / (HOST_CHAR_BIT * unit_size));
+ / (HOST_CHAR_BIT * unit_size));
/* Lazy register values with offsets are not supported. */
if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
VALUE_LVAL (v) = lval_memory;
if (sym)
{
- set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
+ set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
}
else
{
/* The minimal symbol might point to a function descriptor;
resolve it to the actual code address instead. */
struct objfile *objfile = msym.objfile;
- struct gdbarch *gdbarch = get_objfile_arch (objfile);
+ struct gdbarch *gdbarch = objfile->arch ();
set_value_address (v,
gdbarch_convert_from_func_ptr_addr
- (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), ¤t_target));
+ (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym),
+ current_inferior ()->top_target ()));
}
if (arg1p)
value_addr (*arg1p)));
/* Move the `this' pointer according to the offset.
- VALUE_OFFSET (*arg1p) += offset; */
+ VALUE_OFFSET (*arg1p) += offset; */
}
return v;
\f
-/* Unpack a bitfield of the specified FIELD_TYPE, from the object at
- VALADDR, and store the result in *RESULT.
- The bitfield starts at BITPOS bits and contains BITSIZE bits.
-
- Extracting bits depends on endianness of the machine. Compute the
- number of least significant bits to discard. For big endian machines,
- we compute the total number of bits in the anonymous object, subtract
- off the bit count from the MSB of the object to the MSB of the
- bitfield, then the size of the bitfield, which leaves the LSB discard
- count. For little endian machines, the discard count is simply the
- number of bits from the LSB of the anonymous object to the LSB of the
- bitfield.
-
- If the field is signed, we also do sign extension. */
+/* See value.h. */
-static LONGEST
+LONGEST
unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
LONGEST bitpos, LONGEST bitsize)
{
- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
+ enum bfd_endian byte_order = type_byte_order (field_type);
ULONGEST val;
ULONGEST valmask;
int lsbcount;
if (bitsize)
bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
else
- bytes_read = TYPE_LENGTH (field_type);
+ {
+ bytes_read = TYPE_LENGTH (field_type);
+ bitsize = 8 * bytes_read;
+ }
read_offset = bitpos / 8;
/* Extract bits. See comment above. */
- if (gdbarch_bits_big_endian (get_type_arch (field_type)))
+ if (byte_order == BFD_ENDIAN_BIG)
lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
else
lsbcount = (bitpos % 8);
/* If the field does not entirely fill a LONGEST, then zero the sign bits.
If the field is signed, and is negative, then sign extend. */
- if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
+ if (bitsize < 8 * (int) sizeof (val))
{
valmask = (((ULONGEST) 1) << bitsize) - 1;
val &= valmask;
- if (!TYPE_UNSIGNED (field_type))
+ if (!field_type->is_unsigned ())
{
if (val & (valmask ^ (valmask >> 1)))
{
{
int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
+ struct type *field_type = type->field (fieldno).type ();
int bit_offset;
gdb_assert (val != NULL);
{
int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
+ struct type *field_type = type->field (fieldno).type ();
return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
}
int dst_bit_offset;
struct type *field_type = value_type (dest_val);
- byte_order = gdbarch_byte_order (get_type_arch (field_type));
+ byte_order = type_byte_order (field_type);
/* First, unpack and sign extend the bitfield as if it was wholly
valid. Optimized out/unavailable bits are read as zero, but
{
int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
+ struct value *res_val = allocate_value (type->field (fieldno).type ());
unpack_value_bitfield (res_val, bitpos, bitsize,
valaddr, embedded_offset, val);
modify_field (struct type *type, gdb_byte *addr,
LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
{
- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
+ enum bfd_endian byte_order = type_byte_order (type);
ULONGEST oword;
ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
LONGEST bytesize;
if (0 != (fieldval & ~mask))
{
/* FIXME: would like to include fieldval in the message, but
- we don't have a sprintf_longest. */
+ we don't have a sprintf_longest. */
warning (_("Value does not fit in %s bits."), plongest (bitsize));
/* Truncate it, otherwise adjoining fields may be corrupted. */
oword = extract_unsigned_integer (addr, bytesize, byte_order);
/* Shifting for bit field depends on endianness of the target machine. */
- if (gdbarch_bits_big_endian (get_type_arch (type)))
+ if (byte_order == BFD_ENDIAN_BIG)
bitpos = bytesize * 8 - bitpos - bitsize;
oword &= ~(mask << bitpos);
void
pack_long (gdb_byte *buf, struct type *type, LONGEST num)
{
- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
+ enum bfd_endian byte_order = type_byte_order (type);
LONGEST len;
type = check_typedef (type);
len = TYPE_LENGTH (type);
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
+ case TYPE_CODE_RANGE:
+ num -= type->bounds ()->bias;
+ /* Fall through. */
case TYPE_CODE_INT:
case TYPE_CODE_CHAR:
case TYPE_CODE_ENUM:
case TYPE_CODE_FLAGS:
case TYPE_CODE_BOOL:
- case TYPE_CODE_RANGE:
case TYPE_CODE_MEMBERPTR:
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ num &= ((ULONGEST) 1 << bit_size) - 1;
+ num <<= bit_off;
+ }
store_signed_integer (buf, len, byte_order, num);
break;
default:
error (_("Unexpected type (%d) encountered for integer constant."),
- TYPE_CODE (type));
+ type->code ());
}
}
type = check_typedef (type);
len = TYPE_LENGTH (type);
- byte_order = gdbarch_byte_order (get_type_arch (type));
+ byte_order = type_byte_order (type);
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
case TYPE_CODE_INT:
case TYPE_CODE_CHAR:
case TYPE_CODE_BOOL:
case TYPE_CODE_RANGE:
case TYPE_CODE_MEMBERPTR:
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ num &= ((ULONGEST) 1 << bit_size) - 1;
+ num <<= bit_off;
+ }
store_unsigned_integer (buf, len, byte_order, num);
break;
default:
error (_("Unexpected type (%d) encountered "
"for unsigned integer constant."),
- TYPE_CODE (type));
+ type->code ());
}
}
return val;
}
+/* Create and return a value object of TYPE containing the value D. The
+ TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
+ it is converted to target format. */
+
+struct value *
+value_from_host_double (struct type *type, double d)
+{
+ struct value *value = allocate_value (type);
+ gdb_assert (type->code () == TYPE_CODE_FLT);
+ target_float_from_host_double (value_contents_raw (value),
+ value_type (value), d);
+ return value;
+}
/* Create a value of type TYPE whose contents come from VALADDR, if it
is non-null, and whose memory address (in the inferior) is
const gdb_byte *valaddr,
CORE_ADDR address)
{
- struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
+ gdb::array_view<const gdb_byte> view;
+ if (valaddr != nullptr)
+ view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
+ struct type *resolved_type = resolve_dynamic_type (type, view, address);
struct type *resolved_type_no_typedef = check_typedef (resolved_type);
struct value *v;
struct value *
readjust_indirect_value_type (struct value *value, struct type *enc_type,
const struct type *original_type,
- const struct value *original_value)
+ struct value *original_value,
+ CORE_ADDR original_value_address)
{
+ gdb_assert (original_type->code () == TYPE_CODE_PTR
+ || TYPE_IS_REFERENCE (original_type));
+
+ struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
+ gdb::array_view<const gdb_byte> view;
+ struct type *resolved_original_target_type
+ = resolve_dynamic_type (original_target_type, view,
+ original_value_address);
+
/* Re-adjust type. */
- deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
+ deprecated_set_value_type (value, resolved_original_target_type);
/* Add embedding info. */
set_value_enclosing_type (value, enc_type);
enc_type = check_typedef (value_enclosing_type (arg));
enc_type = TYPE_TARGET_TYPE (enc_type);
- retval = value_at_lazy (enc_type,
- unpack_pointer (value_type (arg),
- value_contents (arg)));
+ CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg));
+ retval = value_at_lazy (enc_type, addr);
enc_type = value_type (retval);
- return readjust_indirect_value_type (retval, enc_type,
- value_type_arg_tmp, arg);
+ return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
+ arg, addr);
}
struct value *
arg = coerce_ref (arg);
type = check_typedef (value_type (arg));
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
case TYPE_CODE_ARRAY:
- if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
+ if (!type->is_vector () && current_language->c_style_arrays_p ())
arg = value_coerce_array (arg);
break;
case TYPE_CODE_FUNC:
struct_return_convention (struct gdbarch *gdbarch,
struct value *function, struct type *value_type)
{
- enum type_code code = TYPE_CODE (value_type);
+ enum type_code code = value_type->code ();
if (code == TYPE_CODE_ERROR)
error (_("Function return type unknown."));
using_struct_return (struct gdbarch *gdbarch,
struct value *function, struct type *value_type)
{
- if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
+ if (value_type->code () == TYPE_CODE_VOID)
/* A void return value is never in memory. See also corresponding
code in "print_return_value". */
return 0;
return val->initialized;
}
+/* Helper for value_fetch_lazy when the value is a bitfield. */
+
+static void
+value_fetch_lazy_bitfield (struct value *val)
+{
+ gdb_assert (value_bitsize (val) != 0);
+
+ /* To read a lazy bitfield, read the entire enclosing value. This
+ prevents reading the same block of (possibly volatile) memory once
+ per bitfield. It would be even better to read only the containing
+ word, but we have no way to record that just specific bits of a
+ value have been fetched. */
+ struct value *parent = value_parent (val);
+
+ if (value_lazy (parent))
+ value_fetch_lazy (parent);
+
+ unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
+ value_contents_for_printing (parent),
+ value_offset (val), parent);
+}
+
+/* Helper for value_fetch_lazy when the value is in memory. */
+
+static void
+value_fetch_lazy_memory (struct value *val)
+{
+ gdb_assert (VALUE_LVAL (val) == lval_memory);
+
+ CORE_ADDR addr = value_address (val);
+ struct type *type = check_typedef (value_enclosing_type (val));
+
+ if (TYPE_LENGTH (type))
+ read_value_memory (val, 0, value_stack (val),
+ addr, value_contents_all_raw (val),
+ type_length_units (type));
+}
+
+/* Helper for value_fetch_lazy when the value is in a register. */
+
+static void
+value_fetch_lazy_register (struct value *val)
+{
+ struct frame_info *next_frame;
+ int regnum;
+ struct type *type = check_typedef (value_type (val));
+ struct value *new_val = val, *mark = value_mark ();
+
+ /* Offsets are not supported here; lazy register values must
+ refer to the entire register. */
+ gdb_assert (value_offset (val) == 0);
+
+ while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
+ {
+ struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
+
+ next_frame = frame_find_by_id (next_frame_id);
+ regnum = VALUE_REGNUM (new_val);
+
+ gdb_assert (next_frame != NULL);
+
+ /* Convertible register routines are used for multi-register
+ values and for interpretation in different types
+ (e.g. float or int from a double register). Lazy
+ register values should have the register's natural type,
+ so they do not apply. */
+ gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
+ regnum, type));
+
+ /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
+ Since a "->next" operation was performed when setting
+ this field, we do not need to perform a "next" operation
+ again when unwinding the register. That's why
+ frame_unwind_register_value() is called here instead of
+ get_frame_register_value(). */
+ new_val = frame_unwind_register_value (next_frame, regnum);
+
+ /* If we get another lazy lval_register value, it means the
+ register is found by reading it from NEXT_FRAME's next frame.
+ frame_unwind_register_value should never return a value with
+ the frame id pointing to NEXT_FRAME. If it does, it means we
+ either have two consecutive frames with the same frame id
+ in the frame chain, or some code is trying to unwind
+ behind get_prev_frame's back (e.g., a frame unwind
+ sniffer trying to unwind), bypassing its validations. In
+ any case, it should always be an internal error to end up
+ in this situation. */
+ if (VALUE_LVAL (new_val) == lval_register
+ && value_lazy (new_val)
+ && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
+ internal_error (__FILE__, __LINE__,
+ _("infinite loop while fetching a register"));
+ }
+
+ /* If it's still lazy (for instance, a saved register on the
+ stack), fetch it. */
+ if (value_lazy (new_val))
+ value_fetch_lazy (new_val);
+
+ /* Copy the contents and the unavailability/optimized-out
+ meta-data from NEW_VAL to VAL. */
+ set_value_lazy (val, 0);
+ value_contents_copy (val, value_embedded_offset (val),
+ new_val, value_embedded_offset (new_val),
+ type_length_units (type));
+
+ if (frame_debug)
+ {
+ struct gdbarch *gdbarch;
+ struct frame_info *frame;
+ /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
+ so that the frame level will be shown correctly. */
+ frame = frame_find_by_id (VALUE_FRAME_ID (val));
+ regnum = VALUE_REGNUM (val);
+ gdbarch = get_frame_arch (frame);
+
+ fprintf_unfiltered (gdb_stdlog,
+ "{ value_fetch_lazy "
+ "(frame=%d,regnum=%d(%s),...) ",
+ frame_relative_level (frame), regnum,
+ user_reg_map_regnum_to_name (gdbarch, regnum));
+
+ fprintf_unfiltered (gdb_stdlog, "->");
+ if (value_optimized_out (new_val))
+ {
+ fprintf_unfiltered (gdb_stdlog, " ");
+ val_print_optimized_out (new_val, gdb_stdlog);
+ }
+ else
+ {
+ int i;
+ const gdb_byte *buf = value_contents (new_val);
+
+ if (VALUE_LVAL (new_val) == lval_register)
+ fprintf_unfiltered (gdb_stdlog, " register=%d",
+ VALUE_REGNUM (new_val));
+ else if (VALUE_LVAL (new_val) == lval_memory)
+ fprintf_unfiltered (gdb_stdlog, " address=%s",
+ paddress (gdbarch,
+ value_address (new_val)));
+ else
+ fprintf_unfiltered (gdb_stdlog, " computed");
+
+ fprintf_unfiltered (gdb_stdlog, " bytes=");
+ fprintf_unfiltered (gdb_stdlog, "[");
+ for (i = 0; i < register_size (gdbarch, regnum); i++)
+ fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
+ fprintf_unfiltered (gdb_stdlog, "]");
+ }
+
+ fprintf_unfiltered (gdb_stdlog, " }\n");
+ }
+
+ /* Dispose of the intermediate values. This prevents
+ watchpoints from trying to watch the saved frame pointer. */
+ value_free_to_mark (mark);
+}
+
/* Load the actual content of a lazy value. Fetch the data from the
user's process and clear the lazy flag to indicate that the data in
the buffer is valid.
/* A value is either lazy, or fully fetched. The
availability/validity is only established as we try to fetch a
value. */
- gdb_assert (VEC_empty (range_s, val->optimized_out));
- gdb_assert (VEC_empty (range_s, val->unavailable));
+ gdb_assert (val->optimized_out.empty ());
+ gdb_assert (val->unavailable.empty ());
if (value_bitsize (val))
- {
- /* To read a lazy bitfield, read the entire enclosing value. This
- prevents reading the same block of (possibly volatile) memory once
- per bitfield. It would be even better to read only the containing
- word, but we have no way to record that just specific bits of a
- value have been fetched. */
- struct type *type = check_typedef (value_type (val));
- struct value *parent = value_parent (val);
-
- if (value_lazy (parent))
- value_fetch_lazy (parent);
-
- unpack_value_bitfield (val,
- value_bitpos (val), value_bitsize (val),
- value_contents_for_printing (parent),
- value_offset (val), parent);
- }
+ value_fetch_lazy_bitfield (val);
else if (VALUE_LVAL (val) == lval_memory)
- {
- CORE_ADDR addr = value_address (val);
- struct type *type = check_typedef (value_enclosing_type (val));
-
- if (TYPE_LENGTH (type))
- read_value_memory (val, 0, value_stack (val),
- addr, value_contents_all_raw (val),
- type_length_units (type));
- }
+ value_fetch_lazy_memory (val);
else if (VALUE_LVAL (val) == lval_register)
- {
- struct frame_info *next_frame;
- int regnum;
- struct type *type = check_typedef (value_type (val));
- struct value *new_val = val, *mark = value_mark ();
-
- /* Offsets are not supported here; lazy register values must
- refer to the entire register. */
- gdb_assert (value_offset (val) == 0);
-
- while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
- {
- struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
-
- next_frame = frame_find_by_id (next_frame_id);
- regnum = VALUE_REGNUM (new_val);
-
- gdb_assert (next_frame != NULL);
-
- /* Convertible register routines are used for multi-register
- values and for interpretation in different types
- (e.g. float or int from a double register). Lazy
- register values should have the register's natural type,
- so they do not apply. */
- gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
- regnum, type));
-
- /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
- Since a "->next" operation was performed when setting
- this field, we do not need to perform a "next" operation
- again when unwinding the register. That's why
- frame_unwind_register_value() is called here instead of
- get_frame_register_value(). */
- new_val = frame_unwind_register_value (next_frame, regnum);
-
- /* If we get another lazy lval_register value, it means the
- register is found by reading it from NEXT_FRAME's next frame.
- frame_unwind_register_value should never return a value with
- the frame id pointing to NEXT_FRAME. If it does, it means we
- either have two consecutive frames with the same frame id
- in the frame chain, or some code is trying to unwind
- behind get_prev_frame's back (e.g., a frame unwind
- sniffer trying to unwind), bypassing its validations. In
- any case, it should always be an internal error to end up
- in this situation. */
- if (VALUE_LVAL (new_val) == lval_register
- && value_lazy (new_val)
- && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
- internal_error (__FILE__, __LINE__,
- _("infinite loop while fetching a register"));
- }
-
- /* If it's still lazy (for instance, a saved register on the
- stack), fetch it. */
- if (value_lazy (new_val))
- value_fetch_lazy (new_val);
-
- /* Copy the contents and the unavailability/optimized-out
- meta-data from NEW_VAL to VAL. */
- set_value_lazy (val, 0);
- value_contents_copy (val, value_embedded_offset (val),
- new_val, value_embedded_offset (new_val),
- type_length_units (type));
-
- if (frame_debug)
- {
- struct gdbarch *gdbarch;
- struct frame_info *frame;
- /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
- so that the frame level will be shown correctly. */
- frame = frame_find_by_id (VALUE_FRAME_ID (val));
- regnum = VALUE_REGNUM (val);
- gdbarch = get_frame_arch (frame);
-
- fprintf_unfiltered (gdb_stdlog,
- "{ value_fetch_lazy "
- "(frame=%d,regnum=%d(%s),...) ",
- frame_relative_level (frame), regnum,
- user_reg_map_regnum_to_name (gdbarch, regnum));
-
- fprintf_unfiltered (gdb_stdlog, "->");
- if (value_optimized_out (new_val))
- {
- fprintf_unfiltered (gdb_stdlog, " ");
- val_print_optimized_out (new_val, gdb_stdlog);
- }
- else
- {
- int i;
- const gdb_byte *buf = value_contents (new_val);
-
- if (VALUE_LVAL (new_val) == lval_register)
- fprintf_unfiltered (gdb_stdlog, " register=%d",
- VALUE_REGNUM (new_val));
- else if (VALUE_LVAL (new_val) == lval_memory)
- fprintf_unfiltered (gdb_stdlog, " address=%s",
- paddress (gdbarch,
- value_address (new_val)));
- else
- fprintf_unfiltered (gdb_stdlog, " computed");
-
- fprintf_unfiltered (gdb_stdlog, " bytes=");
- fprintf_unfiltered (gdb_stdlog, "[");
- for (i = 0; i < register_size (gdbarch, regnum); i++)
- fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
- fprintf_unfiltered (gdb_stdlog, "]");
- }
-
- fprintf_unfiltered (gdb_stdlog, " }\n");
- }
-
- /* Dispose of the intermediate values. This prevents
- watchpoints from trying to watch the saved frame pointer. */
- value_free_to_mark (mark);
- }
+ value_fetch_lazy_register (val);
else if (VALUE_LVAL (val) == lval_computed
&& value_computed_funcs (val)->read != NULL)
value_computed_funcs (val)->read (val);
if (argc != 1)
error (_("You must provide one argument for $_isvoid."));
- ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
+ ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
}
+/* Implementation of the convenience function $_creal. Extracts the
+ real part from a complex number. */
+
+static struct value *
+creal_internal_fn (struct gdbarch *gdbarch,
+ const struct language_defn *language,
+ void *cookie, int argc, struct value **argv)
+{
+ if (argc != 1)
+ error (_("You must provide one argument for $_creal."));
+
+ value *cval = argv[0];
+ type *ctype = check_typedef (value_type (cval));
+ if (ctype->code () != TYPE_CODE_COMPLEX)
+ error (_("expected a complex number"));
+ return value_real_part (cval);
+}
+
+/* Implementation of the convenience function $_cimag. Extracts the
+ imaginary part from a complex number. */
+
+static struct value *
+cimag_internal_fn (struct gdbarch *gdbarch,
+ const struct language_defn *language,
+ void *cookie, int argc,
+ struct value **argv)
+{
+ if (argc != 1)
+ error (_("You must provide one argument for $_cimag."));
+
+ value *cval = argv[0];
+ type *ctype = check_typedef (value_type (cval));
+ if (ctype->code () != TYPE_CODE_COMPLEX)
+ error (_("expected a complex number"));
+ return value_imaginary_part (cval);
+}
+
+#if GDB_SELF_TEST
+namespace selftests
+{
+
+/* Test the ranges_contain function. */
+
+static void
+test_ranges_contain ()
+{
+ std::vector<range> ranges;
+ range r;
+
+ /* [10, 14] */
+ r.offset = 10;
+ r.length = 5;
+ ranges.push_back (r);
+
+ /* [20, 24] */
+ r.offset = 20;
+ r.length = 5;
+ ranges.push_back (r);
+
+ /* [2, 6] */
+ SELF_CHECK (!ranges_contain (ranges, 2, 5));
+ /* [9, 13] */
+ SELF_CHECK (ranges_contain (ranges, 9, 5));
+ /* [10, 11] */
+ SELF_CHECK (ranges_contain (ranges, 10, 2));
+ /* [10, 14] */
+ SELF_CHECK (ranges_contain (ranges, 10, 5));
+ /* [13, 18] */
+ SELF_CHECK (ranges_contain (ranges, 13, 6));
+ /* [14, 18] */
+ SELF_CHECK (ranges_contain (ranges, 14, 5));
+ /* [15, 18] */
+ SELF_CHECK (!ranges_contain (ranges, 15, 4));
+ /* [16, 19] */
+ SELF_CHECK (!ranges_contain (ranges, 16, 4));
+ /* [16, 21] */
+ SELF_CHECK (ranges_contain (ranges, 16, 6));
+ /* [21, 21] */
+ SELF_CHECK (ranges_contain (ranges, 21, 1));
+ /* [21, 25] */
+ SELF_CHECK (ranges_contain (ranges, 21, 5));
+ /* [26, 28] */
+ SELF_CHECK (!ranges_contain (ranges, 26, 3));
+}
+
+/* Check that RANGES contains the same ranges as EXPECTED. */
+
+static bool
+check_ranges_vector (gdb::array_view<const range> ranges,
+ gdb::array_view<const range> expected)
+{
+ return ranges == expected;
+}
+
+/* Test the insert_into_bit_range_vector function. */
+
+static void
+test_insert_into_bit_range_vector ()
+{
+ std::vector<range> ranges;
+
+ /* [10, 14] */
+ {
+ insert_into_bit_range_vector (&ranges, 10, 5);
+ static const range expected[] = {
+ {10, 5}
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [10, 14] */
+ {
+ insert_into_bit_range_vector (&ranges, 11, 4);
+ static const range expected = {10, 5};
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [10, 14] [20, 24] */
+ {
+ insert_into_bit_range_vector (&ranges, 20, 5);
+ static const range expected[] = {
+ {10, 5},
+ {20, 5},
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [10, 14] [17, 24] */
+ {
+ insert_into_bit_range_vector (&ranges, 17, 5);
+ static const range expected[] = {
+ {10, 5},
+ {17, 8},
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [2, 8] [10, 14] [17, 24] */
+ {
+ insert_into_bit_range_vector (&ranges, 2, 7);
+ static const range expected[] = {
+ {2, 7},
+ {10, 5},
+ {17, 8},
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [2, 14] [17, 24] */
+ {
+ insert_into_bit_range_vector (&ranges, 9, 1);
+ static const range expected[] = {
+ {2, 13},
+ {17, 8},
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [2, 14] [17, 24] */
+ {
+ insert_into_bit_range_vector (&ranges, 9, 1);
+ static const range expected[] = {
+ {2, 13},
+ {17, 8},
+ };
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+
+ /* [2, 33] */
+ {
+ insert_into_bit_range_vector (&ranges, 4, 30);
+ static const range expected = {2, 32};
+ SELF_CHECK (check_ranges_vector (ranges, expected));
+ }
+}
+
+} /* namespace selftests */
+#endif /* GDB_SELF_TEST */
+
+void _initialize_values ();
void
-_initialize_values (void)
+_initialize_values ()
{
- add_cmd ("convenience", no_class, show_convenience, _("\
+ cmd_list_element *show_convenience_cmd
+ = add_cmd ("convenience", no_class, show_convenience, _("\
Debugger convenience (\"$foo\") variables and functions.\n\
Convenience variables are created when you assign them values;\n\
thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
Convenience functions are defined via the Python API."
#endif
), &showlist);
- add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
+ add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
add_cmd ("values", no_set_class, show_values, _("\
Elements of value history around item number IDX (or last ten)."),
add_prefix_cmd ("function", no_class, function_command, _("\
Placeholder command for showing help on convenience functions."),
- &functionlist, "function ", 0, &cmdlist);
+ &functionlist, 0, &cmdlist);
add_internal_function ("_isvoid", _("\
Check whether an expression is void.\n\
Return 1 if the expression is void, zero otherwise."),
isvoid_internal_fn, NULL);
+ add_internal_function ("_creal", _("\
+Extract the real part of a complex number.\n\
+Usage: $_creal (expression)\n\
+Return the real part of a complex number, the type depends on the\n\
+type of a complex number."),
+ creal_internal_fn, NULL);
+
+ add_internal_function ("_cimag", _("\
+Extract the imaginary part of a complex number.\n\
+Usage: $_cimag (expression)\n\
+Return the imaginary part of a complex number, the type depends on the\n\
+type of a complex number."),
+ cimag_internal_fn, NULL);
+
add_setshow_zuinteger_unlimited_cmd ("max-value-size",
class_support, &max_value_size, _("\
Set maximum sized value gdb will load from the inferior."), _("\
set_max_value_size,
show_max_value_size,
&setlist, &showlist);
+#if GDB_SELF_TEST
+ selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
+ selftests::register_test ("insert_into_bit_range_vector",
+ selftests::test_insert_into_bit_range_vector);
+#endif
+}
+
+/* See value.h. */
+
+void
+finalize_values ()
+{
+ all_values.clear ();
}