#include "gdbcore.h"
#include "floatformat.h"
#include <algorithm>
+#include "gmp-utils.h"
/* Initialize BADNESS constants. */
&floatformat_ibm_long_double_big,
&floatformat_ibm_long_double_little
};
+const struct floatformat *floatformats_bfloat16[BFD_ENDIAN_UNKNOWN] = {
+ &floatformat_bfloat16_big,
+ &floatformat_bfloat16_little
+};
/* Should opaque types be resolved? */
/* Mark pointers as unsigned. The target converts between pointers
and addresses (CORE_ADDRs) using gdbarch_pointer_to_address and
gdbarch_address_to_pointer. */
- TYPE_UNSIGNED (ntype) = 1;
+ ntype->set_is_unsigned (true);
/* Update the length of all the other variants of this type. */
chain = TYPE_CHAIN (ntype);
struct type *
make_reference_type (struct type *type, struct type **typeptr,
- enum type_code refcode)
+ enum type_code refcode)
{
struct type *ntype; /* New type */
struct type **reftype;
gdb_assert (refcode == TYPE_CODE_REF || refcode == TYPE_CODE_RVALUE_REF);
ntype = (refcode == TYPE_CODE_REF ? TYPE_REFERENCE_TYPE (type)
- : TYPE_RVALUE_REFERENCE_TYPE (type));
+ : TYPE_RVALUE_REFERENCE_TYPE (type));
if (ntype)
{
TYPE_TARGET_TYPE (ntype) = type;
reftype = (refcode == TYPE_CODE_REF ? &TYPE_REFERENCE_TYPE (type)
- : &TYPE_RVALUE_REFERENCE_TYPE (type));
+ : &TYPE_RVALUE_REFERENCE_TYPE (type));
*reftype = ntype;
if (param_types[nparams - 1] == NULL)
{
--nparams;
- TYPE_VARARGS (fn) = 1;
+ fn->set_has_varargs (true);
}
else if (check_typedef (param_types[nparams - 1])->code ()
== TYPE_CODE_VOID)
--nparams;
/* Caller should have ensured this. */
gdb_assert (nparams == 0);
- TYPE_PROTOTYPED (fn) = 1;
+ fn->set_is_prototyped (true);
}
else
- TYPE_PROTOTYPED (fn) = 1;
+ fn->set_is_prototyped (true);
}
fn->set_num_fields (nparams);
return fn;
}
-/* Identify address space identifier by name --
- return the integer flag defined in gdbtypes.h. */
+/* Identify address space identifier by name -- return a
+ type_instance_flags. */
-int
-address_space_name_to_int (struct gdbarch *gdbarch,
- const char *space_identifier)
+type_instance_flags
+address_space_name_to_type_instance_flags (struct gdbarch *gdbarch,
+ const char *space_identifier)
{
- int type_flags;
+ type_instance_flags type_flags;
/* Check for known address space delimiters. */
if (!strcmp (space_identifier, "code"))
else if (!strcmp (space_identifier, "data"))
return TYPE_INSTANCE_FLAG_DATA_SPACE;
else if (gdbarch_address_class_name_to_type_flags_p (gdbarch)
- && gdbarch_address_class_name_to_type_flags (gdbarch,
+ && gdbarch_address_class_name_to_type_flags (gdbarch,
space_identifier,
&type_flags))
return type_flags;
error (_("Unknown address space specifier: \"%s\""), space_identifier);
}
-/* Identify address space identifier by integer flag as defined in
- gdbtypes.h -- return the string version of the adress space name. */
+/* Identify address space identifier by type_instance_flags and return
+ the string version of the adress space name. */
const char *
-address_space_int_to_name (struct gdbarch *gdbarch, int space_flag)
+address_space_type_instance_flags_to_name (struct gdbarch *gdbarch,
+ type_instance_flags space_flag)
{
if (space_flag & TYPE_INSTANCE_FLAG_CODE_SPACE)
return "code";
else if (space_flag & TYPE_INSTANCE_FLAG_DATA_SPACE)
return "data";
else if ((space_flag & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL)
- && gdbarch_address_class_type_flags_to_name_p (gdbarch))
+ && gdbarch_address_class_type_flags_to_name_p (gdbarch))
return gdbarch_address_class_type_flags_to_name (gdbarch, space_flag);
else
return NULL;
STORAGE must be in the same obstack as TYPE. */
static struct type *
-make_qualified_type (struct type *type, int new_flags,
+make_qualified_type (struct type *type, type_instance_flags new_flags,
struct type *storage)
{
struct type *ntype;
ntype = type;
do
{
- if (TYPE_INSTANCE_FLAGS (ntype) == new_flags)
+ if (ntype->instance_flags () == new_flags)
return ntype;
ntype = TYPE_CHAIN (ntype);
}
TYPE_CHAIN (type) = ntype;
/* Now set the instance flags and return the new type. */
- TYPE_INSTANCE_FLAGS (ntype) = new_flags;
+ ntype->set_instance_flags (new_flags);
/* Set length of new type to that of the original type. */
TYPE_LENGTH (ntype) = TYPE_LENGTH (type);
representations. */
struct type *
-make_type_with_address_space (struct type *type, int space_flag)
+make_type_with_address_space (struct type *type,
+ type_instance_flags space_flag)
{
- int new_flags = ((TYPE_INSTANCE_FLAGS (type)
- & ~(TYPE_INSTANCE_FLAG_CODE_SPACE
- | TYPE_INSTANCE_FLAG_DATA_SPACE
- | TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL))
- | space_flag);
+ type_instance_flags new_flags = ((type->instance_flags ()
+ & ~(TYPE_INSTANCE_FLAG_CODE_SPACE
+ | TYPE_INSTANCE_FLAG_DATA_SPACE
+ | TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL))
+ | space_flag);
return make_qualified_type (type, new_flags, NULL);
}
{
struct type *ntype; /* New type */
- int new_flags = (TYPE_INSTANCE_FLAGS (type)
- & ~(TYPE_INSTANCE_FLAG_CONST
- | TYPE_INSTANCE_FLAG_VOLATILE));
+ type_instance_flags new_flags = (type->instance_flags ()
+ & ~(TYPE_INSTANCE_FLAG_CONST
+ | TYPE_INSTANCE_FLAG_VOLATILE));
if (cnst)
new_flags |= TYPE_INSTANCE_FLAG_CONST;
make_restrict_type (struct type *type)
{
return make_qualified_type (type,
- (TYPE_INSTANCE_FLAGS (type)
+ (type->instance_flags ()
| TYPE_INSTANCE_FLAG_RESTRICT),
NULL);
}
make_unqualified_type (struct type *type)
{
return make_qualified_type (type,
- (TYPE_INSTANCE_FLAGS (type)
+ (type->instance_flags ()
& ~(TYPE_INSTANCE_FLAG_CONST
| TYPE_INSTANCE_FLAG_VOLATILE
| TYPE_INSTANCE_FLAG_RESTRICT)),
make_atomic_type (struct type *type)
{
return make_qualified_type (type,
- (TYPE_INSTANCE_FLAGS (type)
+ (type->instance_flags ()
| TYPE_INSTANCE_FLAG_ATOMIC),
NULL);
}
/* Assert that the two types have equivalent instance qualifiers.
This should be true for at least all of our debug readers. */
- gdb_assert (TYPE_INSTANCE_FLAGS (ntype) == TYPE_INSTANCE_FLAGS (type));
+ gdb_assert (ntype->instance_flags () == type->instance_flags ());
}
/* Implement direct support for MEMBER_TYPE in GNU C++.
mtype = alloc_type_copy (type);
mtype->set_code (TYPE_CODE_METHOD);
TYPE_LENGTH (mtype) = 1;
- TYPE_STUB (mtype) = 1;
+ mtype->set_is_stub (true);
TYPE_TARGET_TYPE (mtype) = type;
/* TYPE_SELF_TYPE (mtype) = unknown yet */
return mtype;
bool
operator== (const dynamic_prop &l, const dynamic_prop &r)
{
- if (l.kind != r.kind)
+ if (l.kind () != r.kind ())
return false;
- switch (l.kind)
+ switch (l.kind ())
{
case PROP_UNDEFINED:
return true;
case PROP_CONST:
- return l.data.const_val == r.data.const_val;
+ return l.const_val () == r.const_val ();
case PROP_ADDR_OFFSET:
case PROP_LOCEXPR:
case PROP_LOCLIST:
- return l.data.baton == r.data.baton;
+ return l.baton () == r.baton ();
case PROP_VARIANT_PARTS:
- return l.data.variant_parts == r.data.variant_parts;
+ return l.variant_parts () == r.variant_parts ();
case PROP_TYPE:
- return l.data.original_type == r.data.original_type;
+ return l.original_type () == r.original_type ();
}
gdb_assert_not_reached ("unhandled dynamic_prop kind");
result_type = alloc_type_copy (index_type);
result_type->set_code (TYPE_CODE_RANGE);
TYPE_TARGET_TYPE (result_type) = index_type;
- if (TYPE_STUB (index_type))
- TYPE_TARGET_STUB (result_type) = 1;
+ if (index_type->is_stub ())
+ result_type->set_target_is_stub (true);
else
TYPE_LENGTH (result_type) = TYPE_LENGTH (check_typedef (index_type));
bounds->low = *low_bound;
bounds->high = *high_bound;
bounds->bias = bias;
-
- /* Initialize the stride to be a constant, the value will already be zero
- thanks to the use of TYPE_ZALLOC above. */
- bounds->stride.kind = PROP_CONST;
+ bounds->stride.set_const_val (0);
result_type->set_bounds (bounds);
- if (low_bound->kind == PROP_CONST && low_bound->data.const_val >= 0)
- TYPE_UNSIGNED (result_type) = 1;
-
+ if (index_type->code () == TYPE_CODE_FIXED_POINT)
+ result_type->set_is_unsigned (index_type->is_unsigned ());
+ /* Note that the signed-ness of a range type can't simply be copied
+ from the underlying type. Consider a case where the underlying
+ type is 'int', but the range type can hold 0..65535, and where
+ the range is further specified to fit into 16 bits. In this
+ case, if we copy the underlying type's sign, then reading some
+ range values will cause an unwanted sign extension. So, we have
+ some heuristics here instead. */
+ else if (low_bound->kind () == PROP_CONST && low_bound->const_val () >= 0)
+ result_type->set_is_unsigned (true);
/* Ada allows the declaration of range types whose upper bound is
less than the lower bound, so checking the lower bound is not
enough. Make sure we do not mark a range type whose upper bound
is negative as unsigned. */
- if (high_bound->kind == PROP_CONST && high_bound->data.const_val < 0)
- TYPE_UNSIGNED (result_type) = 0;
+ if (high_bound->kind () == PROP_CONST && high_bound->const_val () < 0)
+ result_type->set_is_unsigned (false);
- TYPE_ENDIANITY_NOT_DEFAULT (result_type)
- = TYPE_ENDIANITY_NOT_DEFAULT (index_type);
+ result_type->set_endianity_is_not_default
+ (index_type->endianity_is_not_default ());
return result_type;
}
{
struct dynamic_prop low, high;
- low.kind = PROP_CONST;
- low.data.const_val = low_bound;
-
- high.kind = PROP_CONST;
- high.data.const_val = high_bound;
+ low.set_const_val (low_bound);
+ high.set_const_val (high_bound);
result_type = create_range_type (result_type, index_type, &low, &high, 0);
{
/* If the range doesn't have a defined stride then its stride field will
be initialized to the constant 0. */
- return (bounds->low.kind == PROP_CONST
- && bounds->high.kind == PROP_CONST
- && bounds->stride.kind == PROP_CONST);
+ return (bounds->low.kind () == PROP_CONST
+ && bounds->high.kind () == PROP_CONST
+ && bounds->stride.kind () == PROP_CONST);
}
/* Set *LOWP and *HIGHP to the lower and upper bounds of discrete type
- TYPE. Return 1 if type is a range type, 0 if it is discrete (and
- bounds will fit in LONGEST), or -1 otherwise. */
+ TYPE.
+
+ Return 1 if type is a range type with two defined, constant bounds.
+ Else, return 0 if it is discrete (and bounds will fit in LONGEST).
+ Else, return -1. */
int
get_discrete_bounds (struct type *type, LONGEST *lowp, LONGEST *highp)
switch (type->code ())
{
case TYPE_CODE_RANGE:
- *lowp = TYPE_LOW_BOUND (type);
- *highp = TYPE_HIGH_BOUND (type);
+ /* This function currently only works for ranges with two defined,
+ constant bounds. */
+ if (type->bounds ()->low.kind () != PROP_CONST
+ || type->bounds ()->high.kind () != PROP_CONST)
+ return -1;
+
+ *lowp = type->bounds ()->low.const_val ();
+ *highp = type->bounds ()->high.const_val ();
+
if (TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_ENUM)
{
if (!discrete_position (TYPE_TARGET_TYPE (type), *lowp, lowp)
/* Set unsigned indicator if warranted. */
if (*lowp >= 0)
- {
- TYPE_UNSIGNED (type) = 1;
- }
+ type->set_is_unsigned (true);
}
else
{
case TYPE_CODE_INT:
if (TYPE_LENGTH (type) > sizeof (LONGEST)) /* Too big */
return -1;
- if (!TYPE_UNSIGNED (type))
+ if (!type->is_unsigned ())
{
*lowp = -(1 << (TYPE_LENGTH (type) * TARGET_CHAR_BIT - 1));
*highp = -*lowp - 1;
case TYPE_CODE_CHAR:
*lowp = 0;
/* This round-about calculation is to avoid shifting by
- TYPE_LENGTH (type) * TARGET_CHAR_BIT, which will not work
- if TYPE_LENGTH (type) == sizeof (LONGEST). */
+ TYPE_LENGTH (type) * TARGET_CHAR_BIT, which will not work
+ if TYPE_LENGTH (type) == sizeof (LONGEST). */
*highp = 1 << (TYPE_LENGTH (type) * TARGET_CHAR_BIT - 1);
*highp = (*highp - 1) | *highp;
return 0;
Save the high bound into HIGH_BOUND if not NULL.
Return 1 if the operation was successful. Return zero otherwise,
- in which case the values of LOW_BOUND and HIGH_BOUNDS are unmodified.
-
- We now simply use get_discrete_bounds call to get the values
- of the low and high bounds.
- get_discrete_bounds can return three values:
- 1, meaning that index is a range,
- 0, meaning that index is a discrete type,
- or -1 for failure. */
+ in which case the values of LOW_BOUND and HIGH_BOUNDS are unmodified. */
int
get_array_bounds (struct type *type, LONGEST *low_bound, LONGEST *high_bound)
if (res == -1)
return 0;
- /* Check if the array bounds are undefined. */
- if (res == 1
- && ((low_bound && TYPE_ARRAY_LOWER_BOUND_IS_UNDEFINED (type))
- || (high_bound && TYPE_ARRAY_UPPER_BOUND_IS_UNDEFINED (type))))
- return 0;
-
if (low_bound)
*low_bound = low;
int i;
for (i = 0; i < type->num_fields (); i += 1)
- {
- if (val == TYPE_FIELD_ENUMVAL (type, i))
+ {
+ if (val == TYPE_FIELD_ENUMVAL (type, i))
{
*pos = i;
return 1;
}
- }
+ }
/* Invalid enumeration value. */
return 0;
}
arrays bit size field. */
stride = TYPE_FIELD_BITSIZE (type, 0);
if (stride == 0)
- stride = TYPE_BIT_STRIDE (range_type);
+ stride = range_type->bit_stride ();
if (get_discrete_bounds (range_type, &low_bound, &high_bound) < 0)
low_bound = high_bound = 0;
TYPE_LENGTH (type) =
TYPE_LENGTH (element_type) * (high_bound - low_bound + 1);
+ /* If this array's element is itself an array with a bit stride,
+ then we want to update this array's bit stride to reflect the
+ size of the sub-array. Otherwise, we'll end up using the
+ wrong size when trying to find elements of the outer
+ array. */
+ if (element_type->code () == TYPE_CODE_ARRAY
+ && TYPE_LENGTH (element_type) != 0
+ && TYPE_FIELD_BITSIZE (element_type, 0) != 0
+ && get_array_bounds (element_type, &low_bound, &high_bound) >= 0
+ && high_bound >= low_bound)
+ TYPE_FIELD_BITSIZE (type, 0)
+ = ((high_bound - low_bound + 1)
+ * TYPE_FIELD_BITSIZE (element_type, 0));
+
return true;
}
unsigned int bit_stride)
{
if (byte_stride_prop != NULL
- && byte_stride_prop->kind == PROP_CONST)
+ && byte_stride_prop->kind () == PROP_CONST)
{
/* The byte stride is actually not dynamic. Pretend we were
called with bit_stride set instead of byte_stride_prop.
This will give us the same result type, while avoiding
the need to handle this as a special case. */
- bit_stride = byte_stride_prop->data.const_val * 8;
+ bit_stride = byte_stride_prop->const_val () * 8;
byte_stride_prop = NULL;
}
if (!update_static_array_size (result_type))
{
/* This type is dynamic and its length needs to be computed
- on demand. In the meantime, avoid leaving the TYPE_LENGTH
- undefined by setting it to zero. Although we are not expected
- to trust TYPE_LENGTH in this case, setting the size to zero
- allows us to avoid allocating objects of random sizes in case
- we accidently do. */
+ on demand. In the meantime, avoid leaving the TYPE_LENGTH
+ undefined by setting it to zero. Although we are not expected
+ to trust TYPE_LENGTH in this case, setting the size to zero
+ allows us to avoid allocating objects of random sizes in case
+ we accidently do. */
TYPE_LENGTH (result_type) = 0;
}
/* TYPE_TARGET_STUB will take care of zero length arrays. */
if (TYPE_LENGTH (result_type) == 0)
- TYPE_TARGET_STUB (result_type) = 1;
+ result_type->set_target_is_stub (true);
return result_type;
}
result_type->set_fields
((struct field *) TYPE_ZALLOC (result_type, sizeof (struct field)));
- if (!TYPE_STUB (domain_type))
+ if (!domain_type->is_stub ())
{
LONGEST low_bound, high_bound, bit_length;
TYPE_LENGTH (result_type)
= (bit_length + TARGET_CHAR_BIT - 1) / TARGET_CHAR_BIT;
if (low_bound >= 0)
- TYPE_UNSIGNED (result_type) = 1;
+ result_type->set_is_unsigned (true);
}
result_type->field (0).set_type (domain_type);
make_vector_type (struct type *array_type)
{
struct type *inner_array, *elt_type;
- int flags;
/* Find the innermost array type, in case the array is
multi-dimensional. */
elt_type = TYPE_TARGET_TYPE (inner_array);
if (elt_type->code () == TYPE_CODE_INT)
{
- flags = TYPE_INSTANCE_FLAGS (elt_type) | TYPE_INSTANCE_FLAG_NOTTEXT;
+ type_instance_flags flags
+ = elt_type->instance_flags () | TYPE_INSTANCE_FLAG_NOTTEXT;
elt_type = make_qualified_type (elt_type, flags, NULL);
TYPE_TARGET_TYPE (inner_array) = elt_type;
}
- TYPE_VECTOR (array_type) = 1;
+ array_type->set_is_vector (true);
}
struct type *
type->set_fields (args);
type->set_num_fields (nargs);
if (varargs)
- TYPE_VARARGS (type) = 1;
+ type->set_has_varargs (true);
TYPE_LENGTH (type) = 1; /* In practice, this is never needed. */
}
unsigned int n;
type = check_typedef (type);
- gdb_assert (type->code () == TYPE_CODE_INT && TYPE_UNSIGNED (type));
+ gdb_assert (type->code () == TYPE_CODE_INT && type->is_unsigned ());
gdb_assert (TYPE_LENGTH (type) <= sizeof (ULONGEST));
/* Written this way to avoid overflow. */
unsigned int n;
type = check_typedef (type);
- gdb_assert (type->code () == TYPE_CODE_INT && !TYPE_UNSIGNED (type));
+ gdb_assert (type->code () == TYPE_CODE_INT && !type->is_unsigned ());
gdb_assert (TYPE_LENGTH (type) <= sizeof (LONGEST));
n = TYPE_LENGTH (type) * TARGET_CHAR_BIT;
int i;
/* We must start at zero in case the first (and only) baseclass
- is virtual (and hence we cannot share the table pointer). */
+ is virtual (and hence we cannot share the table pointer). */
for (i = 0; i < TYPE_N_BASECLASSES (type); i++)
{
struct type *baseclass = check_typedef (TYPE_BASECLASS (type, i));
{
struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_BYTE_STRIDE);
- return (prop != NULL && prop->kind != PROP_CONST);
+ return (prop != NULL && prop->kind () != PROP_CONST);
}
/* Worker for is_dynamic_type. */
return 1;
struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_VARIANT_PARTS);
- if (prop != nullptr && prop->kind != PROP_TYPE)
+ if (prop != nullptr && prop->kind () != PROP_TYPE)
return 1;
if (TYPE_HAS_DYNAMIC_LENGTH (type))
const struct dynamic_prop *prop = &dyn_range_type->bounds ()->low;
if (dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
- {
- low_bound.kind = PROP_CONST;
- low_bound.data.const_val = value;
- }
+ low_bound.set_const_val (value);
else
- {
- low_bound.kind = PROP_UNDEFINED;
- low_bound.data.const_val = 0;
- }
+ low_bound.set_undefined ();
prop = &dyn_range_type->bounds ()->high;
if (dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
{
- high_bound.kind = PROP_CONST;
- high_bound.data.const_val = value;
+ high_bound.set_const_val (value);
if (dyn_range_type->bounds ()->flag_upper_bound_is_count)
- high_bound.data.const_val
- = low_bound.data.const_val + high_bound.data.const_val - 1;
+ high_bound.set_const_val
+ (low_bound.const_val () + high_bound.const_val () - 1);
}
else
- {
- high_bound.kind = PROP_UNDEFINED;
- high_bound.data.const_val = 0;
- }
+ high_bound.set_undefined ();
bool byte_stride_p = dyn_range_type->bounds ()->flag_is_byte_stride;
prop = &dyn_range_type->bounds ()->stride;
if (dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
{
- stride.kind = PROP_CONST;
- stride.data.const_val = value;
+ stride.set_const_val (value);
/* If we have a bit stride that is not an exact number of bytes then
I really don't think this is going to work with current GDB, the
}
else
{
- stride.kind = PROP_UNDEFINED;
- stride.data.const_val = 0;
+ stride.set_undefined ();
byte_stride_p = true;
}
will update the length of the array accordingly. */
prop = TYPE_ALLOCATED_PROP (type);
if (prop != NULL && dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
- {
- TYPE_DYN_PROP_ADDR (prop) = value;
- TYPE_DYN_PROP_KIND (prop) = PROP_CONST;
- }
+ prop->set_const_val (value);
+
prop = TYPE_ASSOCIATED_PROP (type);
if (prop != NULL && dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
- {
- TYPE_DYN_PROP_ADDR (prop) = value;
- TYPE_DYN_PROP_KIND (prop) = PROP_CONST;
- }
+ prop->set_const_val (value);
ary_dim = check_typedef (TYPE_TARGET_TYPE (elt_type));
bit_stride = TYPE_FIELD_BITSIZE (type, 0);
return create_array_type_with_stride (type, elt_type, range_type, NULL,
- bit_stride);
+ bit_stride);
}
/* Resolve dynamic bounds of members of the union TYPE to static
resolved_type = copy_type (type);
dynamic_prop *variant_prop = resolved_type->dyn_prop (DYN_PROP_VARIANT_PARTS);
- if (variant_prop != nullptr && variant_prop->kind == PROP_VARIANT_PARTS)
+ if (variant_prop != nullptr && variant_prop->kind () == PROP_VARIANT_PARTS)
{
compute_variant_fields (type, resolved_type, addr_stack,
- *variant_prop->data.variant_parts);
+ *variant_prop->variant_parts ());
/* We want to leave the property attached, so that the Rust code
can tell whether the type was originally an enum. */
- variant_prop->kind = PROP_TYPE;
- variant_prop->data.original_type = type;
+ variant_prop->set_original_type (type);
}
else
{
baton.locexpr = *TYPE_FIELD_DWARF_BLOCK (resolved_type, i);
struct dynamic_prop prop;
- prop.kind = PROP_LOCEXPR;
- prop.data.baton = &baton;
+ prop.set_locexpr (&baton);
CORE_ADDR addr;
if (dwarf2_evaluate_property (&prop, nullptr, addr_stack, &addr,
prop = TYPE_DATA_LOCATION (resolved_type);
if (prop != NULL
&& dwarf2_evaluate_property (prop, NULL, addr_stack, &value))
- {
- TYPE_DYN_PROP_ADDR (prop) = value;
- TYPE_DYN_PROP_KIND (prop) = PROP_CONST;
- }
+ prop->set_const_val (value);
return resolved_type;
}
while (node != NULL)
{
if (node->prop_kind == prop_kind)
- return &node->prop;
+ return &node->prop;
node = node->next;
}
return NULL;
check_typedef (struct type *type)
{
struct type *orig_type = type;
- /* While we're removing typedefs, we don't want to lose qualifiers.
- E.g., const/volatile. */
- int instance_flags = TYPE_INSTANCE_FLAGS (type);
gdb_assert (type);
+ /* While we're removing typedefs, we don't want to lose qualifiers.
+ E.g., const/volatile. */
+ type_instance_flags instance_flags = type->instance_flags ();
+
while (type->code () == TYPE_CODE_TYPEDEF)
{
if (!TYPE_TARGET_TYPE (type))
outer cast in a chain of casting win), instead of assuming
"it can't happen". */
{
- const int ALL_SPACES = (TYPE_INSTANCE_FLAG_CODE_SPACE
- | TYPE_INSTANCE_FLAG_DATA_SPACE);
- const int ALL_CLASSES = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL;
- int new_instance_flags = TYPE_INSTANCE_FLAGS (type);
+ const type_instance_flags ALL_SPACES
+ = (TYPE_INSTANCE_FLAG_CODE_SPACE
+ | TYPE_INSTANCE_FLAG_DATA_SPACE);
+ const type_instance_flags ALL_CLASSES
+ = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL;
+
+ type_instance_flags new_instance_flags = type->instance_flags ();
/* Treat code vs data spaces and address classes separately. */
if ((instance_flags & ALL_SPACES) != 0)
move over any other types NEWTYPE refers to, which could
be an unbounded amount of stuff. */
if (TYPE_OBJFILE (newtype) == TYPE_OBJFILE (type))
- type = make_qualified_type (newtype,
- TYPE_INSTANCE_FLAGS (type),
- type);
+ type = make_qualified_type (newtype, type->instance_flags (), type);
else
type = newtype;
}
}
/* Otherwise, rely on the stub flag being set for opaque/stubbed
types. */
- else if (TYPE_STUB (type) && !currently_reading_symtab)
+ else if (type->is_stub () && !currently_reading_symtab)
{
const char *name = type->name ();
/* FIXME: shouldn't we look in STRUCT_DOMAIN and/or VAR_DOMAIN
- as appropriate? */
+ as appropriate? */
struct symbol *sym;
if (name == NULL)
}
sym = lookup_symbol (name, 0, STRUCT_DOMAIN, 0).symbol;
if (sym)
- {
- /* Same as above for opaque types, we can replace the stub
- with the complete type only if they are in the same
- objfile. */
+ {
+ /* Same as above for opaque types, we can replace the stub
+ with the complete type only if they are in the same
+ objfile. */
if (TYPE_OBJFILE (SYMBOL_TYPE (sym)) == TYPE_OBJFILE (type))
- type = make_qualified_type (SYMBOL_TYPE (sym),
- TYPE_INSTANCE_FLAGS (type),
- type);
+ type = make_qualified_type (SYMBOL_TYPE (sym),
+ type->instance_flags (), type);
else
type = SYMBOL_TYPE (sym);
- }
+ }
}
- if (TYPE_TARGET_STUB (type))
+ if (type->target_is_stub ())
{
struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
- if (TYPE_STUB (target_type) || TYPE_TARGET_STUB (target_type))
+ if (target_type->is_stub () || target_type->target_is_stub ())
{
/* Nothing we can do. */
}
else if (type->code () == TYPE_CODE_RANGE)
{
TYPE_LENGTH (type) = TYPE_LENGTH (target_type);
- TYPE_TARGET_STUB (type) = 0;
+ type->set_target_is_stub (false);
}
else if (type->code () == TYPE_CODE_ARRAY
&& update_static_array_size (type))
- TYPE_TARGET_STUB (type) = 0;
+ type->set_target_is_stub (false);
}
type = make_qualified_type (type, instance_flags, NULL);
if (depth <= 0 && (*p == ',' || *p == ')'))
{
/* Avoid parsing of ellipsis, they will be handled below.
- Also avoid ``void'' as above. */
+ Also avoid ``void'' as above. */
if (strncmp (argtypetext, "...", p - argtypetext) != 0
&& strncmp (argtypetext, "void", p - argtypetext) != 0)
{
We want a method (TYPE_CODE_METHOD). */
smash_to_method_type (mtype, type, TYPE_TARGET_TYPE (mtype),
argtypes, argcount, p[-2] == '.');
- TYPE_STUB (mtype) = 0;
+ mtype->set_is_stub (false);
TYPE_FN_FIELD_STUB (f, signature_id) = 0;
xfree (demangled_name);
case TYPE_CODE_STRUCT:
case TYPE_CODE_UNION:
case TYPE_CODE_NAMESPACE:
- INIT_CPLUS_SPECIFIC (type);
- break;
+ INIT_CPLUS_SPECIFIC (type);
+ break;
case TYPE_CODE_FLT:
- TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_FLOATFORMAT;
- break;
+ TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_FLOATFORMAT;
+ break;
case TYPE_CODE_FUNC:
INIT_FUNC_SPECIFIC (type);
break;
+ case TYPE_CODE_FIXED_POINT:
+ INIT_FIXED_POINT_SPECIFIC (type);
+ break;
}
}
t = init_type (objfile, TYPE_CODE_INT, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
+
+ TYPE_SPECIFIC_FIELD (t) = TYPE_SPECIFIC_INT;
+ TYPE_MAIN_TYPE (t)->type_specific.int_stuff.bit_size = bit;
+ TYPE_MAIN_TYPE (t)->type_specific.int_stuff.bit_offset = 0;
return t;
}
t = init_type (objfile, TYPE_CODE_CHAR, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
return t;
}
t = init_type (objfile, TYPE_CODE_BOOL, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
+
+ TYPE_SPECIFIC_FIELD (t) = TYPE_SPECIFIC_INT;
+ TYPE_MAIN_TYPE (t)->type_specific.int_stuff.bit_size = bit;
+ TYPE_MAIN_TYPE (t)->type_specific.int_stuff.bit_offset = 0;
return t;
}
if (TYPE_MAIN_TYPE (target_type)->flds_bnds.complex_type == nullptr)
{
- if (name == nullptr)
+ if (name == nullptr && target_type->name () != nullptr)
{
char *new_name
= (char *) TYPE_ALLOC (target_type,
t = init_type (objfile, TYPE_CODE_PTR, bit, name);
TYPE_TARGET_TYPE (t) = target_type;
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
+ return t;
+}
+
+/* Allocate a TYPE_CODE_FIXED_POINT type structure associated with OBJFILE.
+ BIT is the pointer type size in bits.
+ UNSIGNED_P should be nonzero if the type is unsigned.
+ NAME is the type name. */
+
+struct type *
+init_fixed_point_type (struct objfile *objfile,
+ int bit, int unsigned_p, const char *name)
+{
+ struct type *t;
+
+ t = init_type (objfile, TYPE_CODE_FIXED_POINT, bit, name);
+ if (unsigned_p)
+ t->set_is_unsigned (true);
+
return t;
}
t = check_typedef (t);
return
((t != NULL)
+ && !is_fixed_point_type (t)
&& ((t->code () == TYPE_CODE_INT)
|| (t->code () == TYPE_CODE_ENUM)
|| (t->code () == TYPE_CODE_FLAGS)
{
type = check_typedef (type);
+ if (is_fixed_point_type (type))
+ return 0; /* Implemented as a scalar, but more like a floating point. */
+
switch (type->code ())
{
case TYPE_CODE_ARRAY:
class_or_union_p (const struct type *t)
{
return (t->code () == TYPE_CODE_STRUCT
- || t->code () == TYPE_CODE_UNION);
+ || t->code () == TYPE_CODE_UNION);
}
/* A helper function which returns true if types A and B represent the
type_byte_order (const struct type *type)
{
bfd_endian byteorder = gdbarch_byte_order (get_type_arch (type));
- if (TYPE_ENDIANITY_NOT_DEFAULT (type))
+ if (type->endianity_is_not_default ())
{
if (byteorder == BFD_ENDIAN_BIG)
- return BFD_ENDIAN_LITTLE;
+ return BFD_ENDIAN_LITTLE;
else
{
gdb_assert (byteorder == BFD_ENDIAN_LITTLE);
if (a->code () == TYPE_CODE_PTR
|| a->code () == TYPE_CODE_REF)
return types_equal (TYPE_TARGET_TYPE (a),
- TYPE_TARGET_TYPE (b));
+ TYPE_TARGET_TYPE (b));
/* Well, damnit, if the names are exactly the same, I'll say they
are exactly the same. This happens when we generate method
if (type1->code () != type2->code ()
|| TYPE_LENGTH (type1) != TYPE_LENGTH (type2)
- || TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2)
- || TYPE_NOSIGN (type1) != TYPE_NOSIGN (type2)
- || TYPE_ENDIANITY_NOT_DEFAULT (type1) != TYPE_ENDIANITY_NOT_DEFAULT (type2)
- || TYPE_VARARGS (type1) != TYPE_VARARGS (type2)
- || TYPE_VECTOR (type1) != TYPE_VECTOR (type2)
+ || type1->is_unsigned () != type2->is_unsigned ()
+ || type1->has_no_signedness () != type2->has_no_signedness ()
+ || type1->endianity_is_not_default () != type2->endianity_is_not_default ()
+ || type1->has_varargs () != type2->has_varargs ()
+ || type1->is_vector () != type2->is_vector ()
|| TYPE_NOTTEXT (type1) != TYPE_NOTTEXT (type2)
- || TYPE_INSTANCE_FLAGS (type1) != TYPE_INSTANCE_FLAGS (type2)
+ || type1->instance_flags () != type2->instance_flags ()
|| type1->num_fields () != type2->num_fields ())
return false;
{
while (!worklist->empty ())
{
- int added;
+ bool added;
struct type_equality_entry entry = std::move (worklist->back ());
worklist->pop_back ();
if (type1 == type2)
return true;
- gdb::bcache cache (nullptr, nullptr);
+ gdb::bcache cache;
worklist.emplace_back (type1, type2);
return check_types_worklist (&worklist, &cache);
}
{
struct dynamic_prop *prop = TYPE_ALLOCATED_PROP (type);
- return (prop && TYPE_DYN_PROP_KIND (prop) == PROP_CONST
- && !TYPE_DYN_PROP_ADDR (prop));
+ return (prop != nullptr && prop->kind () == PROP_CONST
+ && prop->const_val () == 0);
}
/* Associated status of type TYPE. Return zero if type TYPE is associated.
{
struct dynamic_prop *prop = TYPE_ASSOCIATED_PROP (type);
- return (prop && TYPE_DYN_PROP_KIND (prop) == PROP_CONST
- && !TYPE_DYN_PROP_ADDR (prop));
+ return (prop != nullptr && prop->kind () == PROP_CONST
+ && prop->const_val () == 0);
}
/* rank_one_type helper for when PARM's type code is TYPE_CODE_PTR. */
{
/* Deal with signed, unsigned, and plain chars and
signed and unsigned ints. */
- if (TYPE_NOSIGN (parm))
+ if (parm->has_no_signedness ())
{
/* This case only for character types. */
- if (TYPE_NOSIGN (arg))
+ if (arg->has_no_signedness ())
return EXACT_MATCH_BADNESS; /* plain char -> plain char */
else /* signed/unsigned char -> plain char */
return INTEGER_CONVERSION_BADNESS;
}
- else if (TYPE_UNSIGNED (parm))
+ else if (parm->is_unsigned ())
{
- if (TYPE_UNSIGNED (arg))
+ if (arg->is_unsigned ())
{
/* unsigned int -> unsigned int, or
unsigned long -> unsigned long */
return INTEGER_CONVERSION_BADNESS;
}
}
- else if (!TYPE_NOSIGN (arg) && !TYPE_UNSIGNED (arg))
+ else if (!arg->has_no_signedness () && !arg->is_unsigned ())
{
if (integer_types_same_name_p (parm->name (),
arg->name ()))
case TYPE_CODE_CHAR:
/* Deal with signed, unsigned, and plain chars for C++ and
with int cases falling through from previous case. */
- if (TYPE_NOSIGN (parm))
+ if (parm->has_no_signedness ())
{
- if (TYPE_NOSIGN (arg))
+ if (arg->has_no_signedness ())
return EXACT_MATCH_BADNESS;
else
return INTEGER_CONVERSION_BADNESS;
}
- else if (TYPE_UNSIGNED (parm))
+ else if (parm->is_unsigned ())
{
- if (TYPE_UNSIGNED (arg))
+ if (arg->is_unsigned ())
return EXACT_MATCH_BADNESS;
else
return INTEGER_PROMOTION_BADNESS;
}
- else if (!TYPE_NOSIGN (arg) && !TYPE_UNSIGNED (arg))
+ else if (!arg->has_no_signedness () && !arg->is_unsigned ())
return EXACT_MATCH_BADNESS;
else
return INTEGER_CONVERSION_BADNESS;
if (TYPE_IS_REFERENCE (arg))
return (sum_ranks (rank_one_type (parm, TYPE_TARGET_TYPE (arg), NULL),
- REFERENCE_SEE_THROUGH_BADNESS));
+ REFERENCE_SEE_THROUGH_BADNESS));
if (TYPE_IS_REFERENCE (parm))
return (sum_ranks (rank_one_type (TYPE_TARGET_TYPE (parm), arg, NULL),
- REFERENCE_SEE_THROUGH_BADNESS));
+ REFERENCE_SEE_THROUGH_BADNESS));
if (overload_debug)
/* Debugging only. */
fprintf_filtered (gdb_stderr,
}
}
+/* Print the contents of the TYPE's type_specific union, assuming that
+ its type-specific kind is TYPE_SPECIFIC_FIXED_POINT. */
+
+static void
+print_fixed_point_type_info (struct type *type, int spaces)
+{
+ printfi_filtered (spaces + 2, "scaling factor: %s\n",
+ fixed_point_scaling_factor (type).str ().get ());
+}
+
static struct obstack dont_print_type_obstack;
+/* Print the dynamic_prop PROP. */
+
+static void
+dump_dynamic_prop (dynamic_prop const& prop)
+{
+ switch (prop.kind ())
+ {
+ case PROP_CONST:
+ printf_filtered ("%s", plongest (prop.const_val ()));
+ break;
+ case PROP_UNDEFINED:
+ printf_filtered ("(undefined)");
+ break;
+ case PROP_LOCEXPR:
+ case PROP_LOCLIST:
+ printf_filtered ("(dynamic)");
+ break;
+ default:
+ gdb_assert_not_reached ("unhandled prop kind");
+ break;
+ }
+}
+
void
recursive_dump_type (struct type *type, int spaces)
{
case TYPE_CODE_NAMESPACE:
printf_filtered ("(TYPE_CODE_NAMESPACE)");
break;
+ case TYPE_CODE_FIXED_POINT:
+ printf_filtered ("(TYPE_CODE_FIXED_POINT)");
+ break;
default:
printf_filtered ("(UNKNOWN TYPE CODE)");
break;
gdb_print_host_address (TYPE_CHAIN (type), gdb_stdout);
printf_filtered ("\n");
printfi_filtered (spaces, "instance_flags 0x%x",
- TYPE_INSTANCE_FLAGS (type));
+ (unsigned) type->instance_flags ());
if (TYPE_CONST (type))
{
puts_filtered (" TYPE_CONST");
puts_filtered ("\n");
printfi_filtered (spaces, "flags");
- if (TYPE_UNSIGNED (type))
+ if (type->is_unsigned ())
{
puts_filtered (" TYPE_UNSIGNED");
}
- if (TYPE_NOSIGN (type))
+ if (type->has_no_signedness ())
{
puts_filtered (" TYPE_NOSIGN");
}
- if (TYPE_ENDIANITY_NOT_DEFAULT (type))
+ if (type->endianity_is_not_default ())
{
puts_filtered (" TYPE_ENDIANITY_NOT_DEFAULT");
}
- if (TYPE_STUB (type))
+ if (type->is_stub ())
{
puts_filtered (" TYPE_STUB");
}
- if (TYPE_TARGET_STUB (type))
+ if (type->target_is_stub ())
{
puts_filtered (" TYPE_TARGET_STUB");
}
- if (TYPE_PROTOTYPED (type))
+ if (type->is_prototyped ())
{
puts_filtered (" TYPE_PROTOTYPED");
}
- if (TYPE_VARARGS (type))
+ if (type->has_varargs ())
{
puts_filtered (" TYPE_VARARGS");
}
/* This is used for things like AltiVec registers on ppc. Gcc emits
an attribute for the array type, which tells whether or not we
have a vector, instead of a regular array. */
- if (TYPE_VECTOR (type))
+ if (type->is_vector ())
{
puts_filtered (" TYPE_VECTOR");
}
- if (TYPE_FIXED_INSTANCE (type))
+ if (type->is_fixed_instance ())
{
puts_filtered (" TYPE_FIXED_INSTANCE");
}
- if (TYPE_STUB_SUPPORTED (type))
+ if (type->stub_is_supported ())
{
puts_filtered (" TYPE_STUB_SUPPORTED");
}
}
if (type->code () == TYPE_CODE_RANGE)
{
- printfi_filtered (spaces, "low %s%s high %s%s\n",
- plongest (TYPE_LOW_BOUND (type)),
- TYPE_LOW_BOUND_UNDEFINED (type) ? " (undefined)" : "",
- plongest (TYPE_HIGH_BOUND (type)),
- TYPE_HIGH_BOUND_UNDEFINED (type)
- ? " (undefined)" : "");
+ printfi_filtered (spaces, "low ");
+ dump_dynamic_prop (type->bounds ()->low);
+ printf_filtered (" high ");
+ dump_dynamic_prop (type->bounds ()->high);
+ printf_filtered ("\n");
}
switch (TYPE_SPECIFIC_FIELD (type))
case TYPE_SPECIFIC_FUNC:
printfi_filtered (spaces, "calling_convention %d\n",
- TYPE_CALLING_CONVENTION (type));
+ TYPE_CALLING_CONVENTION (type));
/* tail_call_list is not printed. */
break;
gdb_print_host_address (TYPE_SELF_TYPE (type), gdb_stdout);
puts_filtered ("\n");
break;
+
+ case TYPE_SPECIFIC_FIXED_POINT:
+ printfi_filtered (spaces, "fixed_point_info ");
+ print_fixed_point_type_info (type, spaces);
+ puts_filtered ("\n");
+ break;
+
+ case TYPE_SPECIFIC_INT:
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_size = type->bit_size ();
+ unsigned bit_off = type->bit_offset ();
+ printfi_filtered (spaces, " bit size = %u, bit offset = %u\n",
+ bit_size, bit_off);
+ }
+ break;
}
if (spaces == 0)
types without duplicates. We use OBJFILE's obstack, because
OBJFILE is about to be deleted. */
-htab_t
+htab_up
create_copied_types_hash (struct objfile *objfile)
{
- return htab_create_alloc_ex (1, type_pair_hash, type_pair_eq,
- NULL, &objfile->objfile_obstack,
- hashtab_obstack_allocate,
- dummy_obstack_deallocate);
+ return htab_up (htab_create_alloc_ex (1, type_pair_hash, type_pair_eq,
+ NULL, &objfile->objfile_obstack,
+ hashtab_obstack_allocate,
+ dummy_obstack_deallocate));
}
/* Recursively copy (deep copy) a dynamic attribute list of a type. */
if (type->name ())
new_type->set_name (xstrdup (type->name ()));
- TYPE_INSTANCE_FLAGS (new_type) = TYPE_INSTANCE_FLAGS (type);
+ new_type->set_instance_flags (type->instance_flags ());
TYPE_LENGTH (new_type) = TYPE_LENGTH (type);
/* Copy the fields. */
if (type->code () == TYPE_CODE_RANGE)
{
range_bounds *bounds
- = ((struct range_bounds *) TYPE_ALLOC
+ = ((struct range_bounds *) TYPE_ALLOC
(new_type, sizeof (struct range_bounds)));
*bounds = *type->bounds ();
copy_type_recursive (objfile, TYPE_SELF_TYPE (type),
copied_types));
break;
+ case TYPE_SPECIFIC_FIXED_POINT:
+ INIT_FIXED_POINT_SPECIFIC (new_type);
+ TYPE_FIXED_POINT_INFO (new_type)->scaling_factor
+ = TYPE_FIXED_POINT_INFO (type)->scaling_factor;
+ break;
+ case TYPE_SPECIFIC_INT:
+ TYPE_SPECIFIC_FIELD (new_type) = TYPE_SPECIFIC_INT;
+ TYPE_MAIN_TYPE (new_type)->type_specific.int_stuff
+ = TYPE_MAIN_TYPE (type)->type_specific.int_stuff;
+ break;
+
default:
gdb_assert_not_reached ("bad type_specific_kind");
}
gdb_assert (TYPE_OBJFILE_OWNED (type));
new_type = alloc_type_copy (type);
- TYPE_INSTANCE_FLAGS (new_type) = TYPE_INSTANCE_FLAGS (type);
+ new_type->set_instance_flags (type->instance_flags ());
TYPE_LENGTH (new_type) = TYPE_LENGTH (type);
memcpy (TYPE_MAIN_TYPE (new_type), TYPE_MAIN_TYPE (type),
sizeof (struct main_type));
t = arch_type (gdbarch, TYPE_CODE_INT, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
return t;
}
t = arch_type (gdbarch, TYPE_CODE_CHAR, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
return t;
}
t = arch_type (gdbarch, TYPE_CODE_BOOL, bit, name);
if (unsigned_p)
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
return t;
}
t = arch_type (gdbarch, TYPE_CODE_PTR, bit, name);
TYPE_TARGET_TYPE (t) = target_type;
- TYPE_UNSIGNED (t) = 1;
+ t->set_is_unsigned (true);
return t;
}
struct type *type;
type = arch_type (gdbarch, TYPE_CODE_FLAGS, bit, name);
- TYPE_UNSIGNED (type) = 1;
+ type->set_is_unsigned (true);
type->set_num_fields (0);
/* Pre-allocate enough space assuming every field is one bit. */
type->set_fields
append_composite_type_field_aligned (t, name, field, 0);
}
+\f
+
+/* We manage the lifetimes of fixed_point_type_info objects by
+ attaching them to the objfile. Currently, these objects are
+ modified during construction, and GMP does not provide a way to
+ hash the contents of an mpq_t; so it's a bit of a pain to hash-cons
+ them. If we did do this, they could be moved to the per-BFD and
+ shared across objfiles. */
+typedef std::vector<std::unique_ptr<fixed_point_type_info>>
+ fixed_point_type_storage;
+
+/* Key used for managing the storage of fixed-point type info. */
+static const struct objfile_key<fixed_point_type_storage>
+ fixed_point_objfile_key;
+
+/* See gdbtypes.h. */
+
+fixed_point_type_info *
+allocate_fixed_point_type_info (struct type *type)
+{
+ std::unique_ptr<fixed_point_type_info> up (new fixed_point_type_info);
+ fixed_point_type_info *result;
+
+ if (TYPE_OBJFILE_OWNED (type))
+ {
+ fixed_point_type_storage *storage
+ = fixed_point_objfile_key.get (TYPE_OBJFILE (type));
+ if (storage == nullptr)
+ storage = fixed_point_objfile_key.emplace (TYPE_OBJFILE (type));
+ result = up.get ();
+ storage->push_back (std::move (up));
+ }
+ else
+ {
+ /* We just leak the memory, because that's what we do generally
+ for non-objfile-attached types. */
+ result = up.release ();
+ }
+
+ return result;
+}
+
+/* See gdbtypes.h. */
+
+bool
+is_fixed_point_type (struct type *type)
+{
+ while (check_typedef (type)->code () == TYPE_CODE_RANGE)
+ type = TYPE_TARGET_TYPE (check_typedef (type));
+ type = check_typedef (type);
+
+ return type->code () == TYPE_CODE_FIXED_POINT;
+}
+
+/* See gdbtypes.h. */
+
+struct type *
+fixed_point_type_base_type (struct type *type)
+{
+ while (check_typedef (type)->code () == TYPE_CODE_RANGE)
+ type = TYPE_TARGET_TYPE (check_typedef (type));
+ type = check_typedef (type);
+
+ gdb_assert (type->code () == TYPE_CODE_FIXED_POINT);
+ return type;
+}
+
+/* See gdbtypes.h. */
+
+const gdb_mpq &
+fixed_point_scaling_factor (struct type *type)
+{
+ type = fixed_point_type_base_type (type);
+
+ return TYPE_FIXED_POINT_INFO (type)->scaling_factor;
+}
+
+\f
+
static struct gdbarch_data *gdbtypes_data;
const struct builtin_type *
builtin_type->builtin_char
= arch_integer_type (gdbarch, TARGET_CHAR_BIT,
!gdbarch_char_signed (gdbarch), "char");
- TYPE_NOSIGN (builtin_type->builtin_char) = 1;
+ builtin_type->builtin_char->set_has_no_signedness (true);
builtin_type->builtin_signed_char
= arch_integer_type (gdbarch, TARGET_CHAR_BIT,
0, "signed char");
builtin_type->builtin_float
= arch_float_type (gdbarch, gdbarch_float_bit (gdbarch),
"float", gdbarch_float_format (gdbarch));
+ builtin_type->builtin_bfloat16
+ = arch_float_type (gdbarch, gdbarch_bfloat16_bit (gdbarch),
+ "bfloat16", gdbarch_bfloat16_format (gdbarch));
builtin_type->builtin_double
= arch_float_type (gdbarch, gdbarch_double_bit (gdbarch),
"double", gdbarch_double_format (gdbarch));
= arch_integer_type (gdbarch, 128, 0, "int128_t");
builtin_type->builtin_uint128
= arch_integer_type (gdbarch, 128, 1, "uint128_t");
- TYPE_INSTANCE_FLAGS (builtin_type->builtin_int8) |=
- TYPE_INSTANCE_FLAG_NOTTEXT;
- TYPE_INSTANCE_FLAGS (builtin_type->builtin_uint8) |=
- TYPE_INSTANCE_FLAG_NOTTEXT;
+
+ builtin_type->builtin_int8->set_instance_flags
+ (builtin_type->builtin_int8->instance_flags ()
+ | TYPE_INSTANCE_FLAG_NOTTEXT);
+
+ builtin_type->builtin_uint8->set_instance_flags
+ (builtin_type->builtin_uint8->instance_flags ()
+ | TYPE_INSTANCE_FLAG_NOTTEXT);
/* Wide character types. */
builtin_type->builtin_char16
objfile_type->builtin_char
= init_integer_type (objfile, TARGET_CHAR_BIT,
!gdbarch_char_signed (gdbarch), "char");
- TYPE_NOSIGN (objfile_type->builtin_char) = 1;
+ objfile_type->builtin_char->set_has_no_signedness (true);
objfile_type->builtin_signed_char
= init_integer_type (objfile, TARGET_CHAR_BIT,
0, "signed char");
objfile_type->nodebug_text_symbol
= init_type (objfile, TYPE_CODE_FUNC, TARGET_CHAR_BIT,
"<text variable, no debug info>");
+
objfile_type->nodebug_text_gnu_ifunc_symbol
= init_type (objfile, TYPE_CODE_FUNC, TARGET_CHAR_BIT,
"<text gnu-indirect-function variable, no debug info>");
- TYPE_GNU_IFUNC (objfile_type->nodebug_text_gnu_ifunc_symbol) = 1;
+ objfile_type->nodebug_text_gnu_ifunc_symbol->set_is_gnu_ifunc (true);
+
objfile_type->nodebug_got_plt_symbol
= init_pointer_type (objfile, gdbarch_addr_bit (gdbarch),
"<text from jump slot in .got.plt, no debug info>",