| 1 | /* Low level packing and unpacking of values for GDB, the GNU Debugger. |
| 2 | |
| 3 | Copyright (C) 1986-2021 Free Software Foundation, Inc. |
| 4 | |
| 5 | This file is part of GDB. |
| 6 | |
| 7 | This program is free software; you can redistribute it and/or modify |
| 8 | it under the terms of the GNU General Public License as published by |
| 9 | the Free Software Foundation; either version 3 of the License, or |
| 10 | (at your option) any later version. |
| 11 | |
| 12 | This program is distributed in the hope that it will be useful, |
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | GNU General Public License for more details. |
| 16 | |
| 17 | You should have received a copy of the GNU General Public License |
| 18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include "defs.h" |
| 21 | #include "arch-utils.h" |
| 22 | #include "symtab.h" |
| 23 | #include "gdbtypes.h" |
| 24 | #include "value.h" |
| 25 | #include "gdbcore.h" |
| 26 | #include "command.h" |
| 27 | #include "gdbcmd.h" |
| 28 | #include "target.h" |
| 29 | #include "language.h" |
| 30 | #include "demangle.h" |
| 31 | #include "regcache.h" |
| 32 | #include "block.h" |
| 33 | #include "target-float.h" |
| 34 | #include "objfiles.h" |
| 35 | #include "valprint.h" |
| 36 | #include "cli/cli-decode.h" |
| 37 | #include "extension.h" |
| 38 | #include <ctype.h> |
| 39 | #include "tracepoint.h" |
| 40 | #include "cp-abi.h" |
| 41 | #include "user-regs.h" |
| 42 | #include <algorithm> |
| 43 | #include "completer.h" |
| 44 | #include "gdbsupport/selftest.h" |
| 45 | #include "gdbsupport/array-view.h" |
| 46 | #include "cli/cli-style.h" |
| 47 | #include "expop.h" |
| 48 | #include "inferior.h" |
| 49 | |
| 50 | /* Definition of a user function. */ |
| 51 | struct internal_function |
| 52 | { |
| 53 | /* The name of the function. It is a bit odd to have this in the |
| 54 | function itself -- the user might use a differently-named |
| 55 | convenience variable to hold the function. */ |
| 56 | char *name; |
| 57 | |
| 58 | /* The handler. */ |
| 59 | internal_function_fn handler; |
| 60 | |
| 61 | /* User data for the handler. */ |
| 62 | void *cookie; |
| 63 | }; |
| 64 | |
| 65 | /* Defines an [OFFSET, OFFSET + LENGTH) range. */ |
| 66 | |
| 67 | struct range |
| 68 | { |
| 69 | /* Lowest offset in the range. */ |
| 70 | LONGEST offset; |
| 71 | |
| 72 | /* Length of the range. */ |
| 73 | LONGEST length; |
| 74 | |
| 75 | /* Returns true if THIS is strictly less than OTHER, useful for |
| 76 | searching. We keep ranges sorted by offset and coalesce |
| 77 | overlapping and contiguous ranges, so this just compares the |
| 78 | starting offset. */ |
| 79 | |
| 80 | bool operator< (const range &other) const |
| 81 | { |
| 82 | return offset < other.offset; |
| 83 | } |
| 84 | |
| 85 | /* Returns true if THIS is equal to OTHER. */ |
| 86 | bool operator== (const range &other) const |
| 87 | { |
| 88 | return offset == other.offset && length == other.length; |
| 89 | } |
| 90 | }; |
| 91 | |
| 92 | /* Returns true if the ranges defined by [offset1, offset1+len1) and |
| 93 | [offset2, offset2+len2) overlap. */ |
| 94 | |
| 95 | static int |
| 96 | ranges_overlap (LONGEST offset1, LONGEST len1, |
| 97 | LONGEST offset2, LONGEST len2) |
| 98 | { |
| 99 | ULONGEST h, l; |
| 100 | |
| 101 | l = std::max (offset1, offset2); |
| 102 | h = std::min (offset1 + len1, offset2 + len2); |
| 103 | return (l < h); |
| 104 | } |
| 105 | |
| 106 | /* Returns true if RANGES contains any range that overlaps [OFFSET, |
| 107 | OFFSET+LENGTH). */ |
| 108 | |
| 109 | static int |
| 110 | ranges_contain (const std::vector<range> &ranges, LONGEST offset, |
| 111 | LONGEST length) |
| 112 | { |
| 113 | range what; |
| 114 | |
| 115 | what.offset = offset; |
| 116 | what.length = length; |
| 117 | |
| 118 | /* We keep ranges sorted by offset and coalesce overlapping and |
| 119 | contiguous ranges, so to check if a range list contains a given |
| 120 | range, we can do a binary search for the position the given range |
| 121 | would be inserted if we only considered the starting OFFSET of |
| 122 | ranges. We call that position I. Since we also have LENGTH to |
| 123 | care for (this is a range afterall), we need to check if the |
| 124 | _previous_ range overlaps the I range. E.g., |
| 125 | |
| 126 | R |
| 127 | |---| |
| 128 | |---| |---| |------| ... |--| |
| 129 | 0 1 2 N |
| 130 | |
| 131 | I=1 |
| 132 | |
| 133 | In the case above, the binary search would return `I=1', meaning, |
| 134 | this OFFSET should be inserted at position 1, and the current |
| 135 | position 1 should be pushed further (and before 2). But, `0' |
| 136 | overlaps with R. |
| 137 | |
| 138 | Then we need to check if the I range overlaps the I range itself. |
| 139 | E.g., |
| 140 | |
| 141 | R |
| 142 | |---| |
| 143 | |---| |---| |-------| ... |--| |
| 144 | 0 1 2 N |
| 145 | |
| 146 | I=1 |
| 147 | */ |
| 148 | |
| 149 | |
| 150 | auto i = std::lower_bound (ranges.begin (), ranges.end (), what); |
| 151 | |
| 152 | if (i > ranges.begin ()) |
| 153 | { |
| 154 | const struct range &bef = *(i - 1); |
| 155 | |
| 156 | if (ranges_overlap (bef.offset, bef.length, offset, length)) |
| 157 | return 1; |
| 158 | } |
| 159 | |
| 160 | if (i < ranges.end ()) |
| 161 | { |
| 162 | const struct range &r = *i; |
| 163 | |
| 164 | if (ranges_overlap (r.offset, r.length, offset, length)) |
| 165 | return 1; |
| 166 | } |
| 167 | |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | static struct cmd_list_element *functionlist; |
| 172 | |
| 173 | /* Note that the fields in this structure are arranged to save a bit |
| 174 | of memory. */ |
| 175 | |
| 176 | struct value |
| 177 | { |
| 178 | explicit value (struct type *type_) |
| 179 | : modifiable (1), |
| 180 | lazy (1), |
| 181 | initialized (1), |
| 182 | stack (0), |
| 183 | type (type_), |
| 184 | enclosing_type (type_) |
| 185 | { |
| 186 | } |
| 187 | |
| 188 | ~value () |
| 189 | { |
| 190 | if (VALUE_LVAL (this) == lval_computed) |
| 191 | { |
| 192 | const struct lval_funcs *funcs = location.computed.funcs; |
| 193 | |
| 194 | if (funcs->free_closure) |
| 195 | funcs->free_closure (this); |
| 196 | } |
| 197 | else if (VALUE_LVAL (this) == lval_xcallable) |
| 198 | delete location.xm_worker; |
| 199 | } |
| 200 | |
| 201 | DISABLE_COPY_AND_ASSIGN (value); |
| 202 | |
| 203 | /* Type of value; either not an lval, or one of the various |
| 204 | different possible kinds of lval. */ |
| 205 | enum lval_type lval = not_lval; |
| 206 | |
| 207 | /* Is it modifiable? Only relevant if lval != not_lval. */ |
| 208 | unsigned int modifiable : 1; |
| 209 | |
| 210 | /* If zero, contents of this value are in the contents field. If |
| 211 | nonzero, contents are in inferior. If the lval field is lval_memory, |
| 212 | the contents are in inferior memory at location.address plus offset. |
| 213 | The lval field may also be lval_register. |
| 214 | |
| 215 | WARNING: This field is used by the code which handles watchpoints |
| 216 | (see breakpoint.c) to decide whether a particular value can be |
| 217 | watched by hardware watchpoints. If the lazy flag is set for |
| 218 | some member of a value chain, it is assumed that this member of |
| 219 | the chain doesn't need to be watched as part of watching the |
| 220 | value itself. This is how GDB avoids watching the entire struct |
| 221 | or array when the user wants to watch a single struct member or |
| 222 | array element. If you ever change the way lazy flag is set and |
| 223 | reset, be sure to consider this use as well! */ |
| 224 | unsigned int lazy : 1; |
| 225 | |
| 226 | /* If value is a variable, is it initialized or not. */ |
| 227 | unsigned int initialized : 1; |
| 228 | |
| 229 | /* If value is from the stack. If this is set, read_stack will be |
| 230 | used instead of read_memory to enable extra caching. */ |
| 231 | unsigned int stack : 1; |
| 232 | |
| 233 | /* Location of value (if lval). */ |
| 234 | union |
| 235 | { |
| 236 | /* If lval == lval_memory, this is the address in the inferior */ |
| 237 | CORE_ADDR address; |
| 238 | |
| 239 | /*If lval == lval_register, the value is from a register. */ |
| 240 | struct |
| 241 | { |
| 242 | /* Register number. */ |
| 243 | int regnum; |
| 244 | /* Frame ID of "next" frame to which a register value is relative. |
| 245 | If the register value is found relative to frame F, then the |
| 246 | frame id of F->next will be stored in next_frame_id. */ |
| 247 | struct frame_id next_frame_id; |
| 248 | } reg; |
| 249 | |
| 250 | /* Pointer to internal variable. */ |
| 251 | struct internalvar *internalvar; |
| 252 | |
| 253 | /* Pointer to xmethod worker. */ |
| 254 | struct xmethod_worker *xm_worker; |
| 255 | |
| 256 | /* If lval == lval_computed, this is a set of function pointers |
| 257 | to use to access and describe the value, and a closure pointer |
| 258 | for them to use. */ |
| 259 | struct |
| 260 | { |
| 261 | /* Functions to call. */ |
| 262 | const struct lval_funcs *funcs; |
| 263 | |
| 264 | /* Closure for those functions to use. */ |
| 265 | void *closure; |
| 266 | } computed; |
| 267 | } location {}; |
| 268 | |
| 269 | /* Describes offset of a value within lval of a structure in target |
| 270 | addressable memory units. Note also the member embedded_offset |
| 271 | below. */ |
| 272 | LONGEST offset = 0; |
| 273 | |
| 274 | /* Only used for bitfields; number of bits contained in them. */ |
| 275 | LONGEST bitsize = 0; |
| 276 | |
| 277 | /* Only used for bitfields; position of start of field. For |
| 278 | little-endian targets, it is the position of the LSB. For |
| 279 | big-endian targets, it is the position of the MSB. */ |
| 280 | LONGEST bitpos = 0; |
| 281 | |
| 282 | /* The number of references to this value. When a value is created, |
| 283 | the value chain holds a reference, so REFERENCE_COUNT is 1. If |
| 284 | release_value is called, this value is removed from the chain but |
| 285 | the caller of release_value now has a reference to this value. |
| 286 | The caller must arrange for a call to value_free later. */ |
| 287 | int reference_count = 1; |
| 288 | |
| 289 | /* Only used for bitfields; the containing value. This allows a |
| 290 | single read from the target when displaying multiple |
| 291 | bitfields. */ |
| 292 | value_ref_ptr parent; |
| 293 | |
| 294 | /* Type of the value. */ |
| 295 | struct type *type; |
| 296 | |
| 297 | /* If a value represents a C++ object, then the `type' field gives |
| 298 | the object's compile-time type. If the object actually belongs |
| 299 | to some class derived from `type', perhaps with other base |
| 300 | classes and additional members, then `type' is just a subobject |
| 301 | of the real thing, and the full object is probably larger than |
| 302 | `type' would suggest. |
| 303 | |
| 304 | If `type' is a dynamic class (i.e. one with a vtable), then GDB |
| 305 | can actually determine the object's run-time type by looking at |
| 306 | the run-time type information in the vtable. When this |
| 307 | information is available, we may elect to read in the entire |
| 308 | object, for several reasons: |
| 309 | |
| 310 | - When printing the value, the user would probably rather see the |
| 311 | full object, not just the limited portion apparent from the |
| 312 | compile-time type. |
| 313 | |
| 314 | - If `type' has virtual base classes, then even printing `type' |
| 315 | alone may require reaching outside the `type' portion of the |
| 316 | object to wherever the virtual base class has been stored. |
| 317 | |
| 318 | When we store the entire object, `enclosing_type' is the run-time |
| 319 | type -- the complete object -- and `embedded_offset' is the |
| 320 | offset of `type' within that larger type, in target addressable memory |
| 321 | units. The value_contents() macro takes `embedded_offset' into account, |
| 322 | so most GDB code continues to see the `type' portion of the value, just |
| 323 | as the inferior would. |
| 324 | |
| 325 | If `type' is a pointer to an object, then `enclosing_type' is a |
| 326 | pointer to the object's run-time type, and `pointed_to_offset' is |
| 327 | the offset in target addressable memory units from the full object |
| 328 | to the pointed-to object -- that is, the value `embedded_offset' would |
| 329 | have if we followed the pointer and fetched the complete object. |
| 330 | (I don't really see the point. Why not just determine the |
| 331 | run-time type when you indirect, and avoid the special case? The |
| 332 | contents don't matter until you indirect anyway.) |
| 333 | |
| 334 | If we're not doing anything fancy, `enclosing_type' is equal to |
| 335 | `type', and `embedded_offset' is zero, so everything works |
| 336 | normally. */ |
| 337 | struct type *enclosing_type; |
| 338 | LONGEST embedded_offset = 0; |
| 339 | LONGEST pointed_to_offset = 0; |
| 340 | |
| 341 | /* Actual contents of the value. Target byte-order. NULL or not |
| 342 | valid if lazy is nonzero. */ |
| 343 | gdb::unique_xmalloc_ptr<gdb_byte> contents; |
| 344 | |
| 345 | /* Unavailable ranges in CONTENTS. We mark unavailable ranges, |
| 346 | rather than available, since the common and default case is for a |
| 347 | value to be available. This is filled in at value read time. |
| 348 | The unavailable ranges are tracked in bits. Note that a contents |
| 349 | bit that has been optimized out doesn't really exist in the |
| 350 | program, so it can't be marked unavailable either. */ |
| 351 | std::vector<range> unavailable; |
| 352 | |
| 353 | /* Likewise, but for optimized out contents (a chunk of the value of |
| 354 | a variable that does not actually exist in the program). If LVAL |
| 355 | is lval_register, this is a register ($pc, $sp, etc., never a |
| 356 | program variable) that has not been saved in the frame. Not |
| 357 | saved registers and optimized-out program variables values are |
| 358 | treated pretty much the same, except not-saved registers have a |
| 359 | different string representation and related error strings. */ |
| 360 | std::vector<range> optimized_out; |
| 361 | }; |
| 362 | |
| 363 | /* See value.h. */ |
| 364 | |
| 365 | struct gdbarch * |
| 366 | get_value_arch (const struct value *value) |
| 367 | { |
| 368 | return value_type (value)->arch (); |
| 369 | } |
| 370 | |
| 371 | int |
| 372 | value_bits_available (const struct value *value, LONGEST offset, LONGEST length) |
| 373 | { |
| 374 | gdb_assert (!value->lazy); |
| 375 | |
| 376 | return !ranges_contain (value->unavailable, offset, length); |
| 377 | } |
| 378 | |
| 379 | int |
| 380 | value_bytes_available (const struct value *value, |
| 381 | LONGEST offset, LONGEST length) |
| 382 | { |
| 383 | return value_bits_available (value, |
| 384 | offset * TARGET_CHAR_BIT, |
| 385 | length * TARGET_CHAR_BIT); |
| 386 | } |
| 387 | |
| 388 | int |
| 389 | value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length) |
| 390 | { |
| 391 | gdb_assert (!value->lazy); |
| 392 | |
| 393 | return ranges_contain (value->optimized_out, bit_offset, bit_length); |
| 394 | } |
| 395 | |
| 396 | int |
| 397 | value_entirely_available (struct value *value) |
| 398 | { |
| 399 | /* We can only tell whether the whole value is available when we try |
| 400 | to read it. */ |
| 401 | if (value->lazy) |
| 402 | value_fetch_lazy (value); |
| 403 | |
| 404 | if (value->unavailable.empty ()) |
| 405 | return 1; |
| 406 | return 0; |
| 407 | } |
| 408 | |
| 409 | /* Returns true if VALUE is entirely covered by RANGES. If the value |
| 410 | is lazy, it'll be read now. Note that RANGE is a pointer to |
| 411 | pointer because reading the value might change *RANGE. */ |
| 412 | |
| 413 | static int |
| 414 | value_entirely_covered_by_range_vector (struct value *value, |
| 415 | const std::vector<range> &ranges) |
| 416 | { |
| 417 | /* We can only tell whether the whole value is optimized out / |
| 418 | unavailable when we try to read it. */ |
| 419 | if (value->lazy) |
| 420 | value_fetch_lazy (value); |
| 421 | |
| 422 | if (ranges.size () == 1) |
| 423 | { |
| 424 | const struct range &t = ranges[0]; |
| 425 | |
| 426 | if (t.offset == 0 |
| 427 | && t.length == (TARGET_CHAR_BIT |
| 428 | * TYPE_LENGTH (value_enclosing_type (value)))) |
| 429 | return 1; |
| 430 | } |
| 431 | |
| 432 | return 0; |
| 433 | } |
| 434 | |
| 435 | int |
| 436 | value_entirely_unavailable (struct value *value) |
| 437 | { |
| 438 | return value_entirely_covered_by_range_vector (value, value->unavailable); |
| 439 | } |
| 440 | |
| 441 | int |
| 442 | value_entirely_optimized_out (struct value *value) |
| 443 | { |
| 444 | return value_entirely_covered_by_range_vector (value, value->optimized_out); |
| 445 | } |
| 446 | |
| 447 | /* Insert into the vector pointed to by VECTORP the bit range starting of |
| 448 | OFFSET bits, and extending for the next LENGTH bits. */ |
| 449 | |
| 450 | static void |
| 451 | insert_into_bit_range_vector (std::vector<range> *vectorp, |
| 452 | LONGEST offset, LONGEST length) |
| 453 | { |
| 454 | range newr; |
| 455 | |
| 456 | /* Insert the range sorted. If there's overlap or the new range |
| 457 | would be contiguous with an existing range, merge. */ |
| 458 | |
| 459 | newr.offset = offset; |
| 460 | newr.length = length; |
| 461 | |
| 462 | /* Do a binary search for the position the given range would be |
| 463 | inserted if we only considered the starting OFFSET of ranges. |
| 464 | Call that position I. Since we also have LENGTH to care for |
| 465 | (this is a range afterall), we need to check if the _previous_ |
| 466 | range overlaps the I range. E.g., calling R the new range: |
| 467 | |
| 468 | #1 - overlaps with previous |
| 469 | |
| 470 | R |
| 471 | |-...-| |
| 472 | |---| |---| |------| ... |--| |
| 473 | 0 1 2 N |
| 474 | |
| 475 | I=1 |
| 476 | |
| 477 | In the case #1 above, the binary search would return `I=1', |
| 478 | meaning, this OFFSET should be inserted at position 1, and the |
| 479 | current position 1 should be pushed further (and become 2). But, |
| 480 | note that `0' overlaps with R, so we want to merge them. |
| 481 | |
| 482 | A similar consideration needs to be taken if the new range would |
| 483 | be contiguous with the previous range: |
| 484 | |
| 485 | #2 - contiguous with previous |
| 486 | |
| 487 | R |
| 488 | |-...-| |
| 489 | |--| |---| |------| ... |--| |
| 490 | 0 1 2 N |
| 491 | |
| 492 | I=1 |
| 493 | |
| 494 | If there's no overlap with the previous range, as in: |
| 495 | |
| 496 | #3 - not overlapping and not contiguous |
| 497 | |
| 498 | R |
| 499 | |-...-| |
| 500 | |--| |---| |------| ... |--| |
| 501 | 0 1 2 N |
| 502 | |
| 503 | I=1 |
| 504 | |
| 505 | or if I is 0: |
| 506 | |
| 507 | #4 - R is the range with lowest offset |
| 508 | |
| 509 | R |
| 510 | |-...-| |
| 511 | |--| |---| |------| ... |--| |
| 512 | 0 1 2 N |
| 513 | |
| 514 | I=0 |
| 515 | |
| 516 | ... we just push the new range to I. |
| 517 | |
| 518 | All the 4 cases above need to consider that the new range may |
| 519 | also overlap several of the ranges that follow, or that R may be |
| 520 | contiguous with the following range, and merge. E.g., |
| 521 | |
| 522 | #5 - overlapping following ranges |
| 523 | |
| 524 | R |
| 525 | |------------------------| |
| 526 | |--| |---| |------| ... |--| |
| 527 | 0 1 2 N |
| 528 | |
| 529 | I=0 |
| 530 | |
| 531 | or: |
| 532 | |
| 533 | R |
| 534 | |-------| |
| 535 | |--| |---| |------| ... |--| |
| 536 | 0 1 2 N |
| 537 | |
| 538 | I=1 |
| 539 | |
| 540 | */ |
| 541 | |
| 542 | auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr); |
| 543 | if (i > vectorp->begin ()) |
| 544 | { |
| 545 | struct range &bef = *(i - 1); |
| 546 | |
| 547 | if (ranges_overlap (bef.offset, bef.length, offset, length)) |
| 548 | { |
| 549 | /* #1 */ |
| 550 | ULONGEST l = std::min (bef.offset, offset); |
| 551 | ULONGEST h = std::max (bef.offset + bef.length, offset + length); |
| 552 | |
| 553 | bef.offset = l; |
| 554 | bef.length = h - l; |
| 555 | i--; |
| 556 | } |
| 557 | else if (offset == bef.offset + bef.length) |
| 558 | { |
| 559 | /* #2 */ |
| 560 | bef.length += length; |
| 561 | i--; |
| 562 | } |
| 563 | else |
| 564 | { |
| 565 | /* #3 */ |
| 566 | i = vectorp->insert (i, newr); |
| 567 | } |
| 568 | } |
| 569 | else |
| 570 | { |
| 571 | /* #4 */ |
| 572 | i = vectorp->insert (i, newr); |
| 573 | } |
| 574 | |
| 575 | /* Check whether the ranges following the one we've just added or |
| 576 | touched can be folded in (#5 above). */ |
| 577 | if (i != vectorp->end () && i + 1 < vectorp->end ()) |
| 578 | { |
| 579 | int removed = 0; |
| 580 | auto next = i + 1; |
| 581 | |
| 582 | /* Get the range we just touched. */ |
| 583 | struct range &t = *i; |
| 584 | removed = 0; |
| 585 | |
| 586 | i = next; |
| 587 | for (; i < vectorp->end (); i++) |
| 588 | { |
| 589 | struct range &r = *i; |
| 590 | if (r.offset <= t.offset + t.length) |
| 591 | { |
| 592 | ULONGEST l, h; |
| 593 | |
| 594 | l = std::min (t.offset, r.offset); |
| 595 | h = std::max (t.offset + t.length, r.offset + r.length); |
| 596 | |
| 597 | t.offset = l; |
| 598 | t.length = h - l; |
| 599 | |
| 600 | removed++; |
| 601 | } |
| 602 | else |
| 603 | { |
| 604 | /* If we couldn't merge this one, we won't be able to |
| 605 | merge following ones either, since the ranges are |
| 606 | always sorted by OFFSET. */ |
| 607 | break; |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | if (removed != 0) |
| 612 | vectorp->erase (next, next + removed); |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | void |
| 617 | mark_value_bits_unavailable (struct value *value, |
| 618 | LONGEST offset, LONGEST length) |
| 619 | { |
| 620 | insert_into_bit_range_vector (&value->unavailable, offset, length); |
| 621 | } |
| 622 | |
| 623 | void |
| 624 | mark_value_bytes_unavailable (struct value *value, |
| 625 | LONGEST offset, LONGEST length) |
| 626 | { |
| 627 | mark_value_bits_unavailable (value, |
| 628 | offset * TARGET_CHAR_BIT, |
| 629 | length * TARGET_CHAR_BIT); |
| 630 | } |
| 631 | |
| 632 | /* Find the first range in RANGES that overlaps the range defined by |
| 633 | OFFSET and LENGTH, starting at element POS in the RANGES vector, |
| 634 | Returns the index into RANGES where such overlapping range was |
| 635 | found, or -1 if none was found. */ |
| 636 | |
| 637 | static int |
| 638 | find_first_range_overlap (const std::vector<range> *ranges, int pos, |
| 639 | LONGEST offset, LONGEST length) |
| 640 | { |
| 641 | int i; |
| 642 | |
| 643 | for (i = pos; i < ranges->size (); i++) |
| 644 | { |
| 645 | const range &r = (*ranges)[i]; |
| 646 | if (ranges_overlap (r.offset, r.length, offset, length)) |
| 647 | return i; |
| 648 | } |
| 649 | |
| 650 | return -1; |
| 651 | } |
| 652 | |
| 653 | /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at |
| 654 | PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise |
| 655 | return non-zero. |
| 656 | |
| 657 | It must always be the case that: |
| 658 | OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT |
| 659 | |
| 660 | It is assumed that memory can be accessed from: |
| 661 | PTR + (OFFSET_BITS / TARGET_CHAR_BIT) |
| 662 | to: |
| 663 | PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1) |
| 664 | / TARGET_CHAR_BIT) */ |
| 665 | static int |
| 666 | memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, |
| 667 | const gdb_byte *ptr2, size_t offset2_bits, |
| 668 | size_t length_bits) |
| 669 | { |
| 670 | gdb_assert (offset1_bits % TARGET_CHAR_BIT |
| 671 | == offset2_bits % TARGET_CHAR_BIT); |
| 672 | |
| 673 | if (offset1_bits % TARGET_CHAR_BIT != 0) |
| 674 | { |
| 675 | size_t bits; |
| 676 | gdb_byte mask, b1, b2; |
| 677 | |
| 678 | /* The offset from the base pointers PTR1 and PTR2 is not a complete |
| 679 | number of bytes. A number of bits up to either the next exact |
| 680 | byte boundary, or LENGTH_BITS (which ever is sooner) will be |
| 681 | compared. */ |
| 682 | bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT; |
| 683 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); |
| 684 | mask = (1 << bits) - 1; |
| 685 | |
| 686 | if (length_bits < bits) |
| 687 | { |
| 688 | mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1); |
| 689 | bits = length_bits; |
| 690 | } |
| 691 | |
| 692 | /* Now load the two bytes and mask off the bits we care about. */ |
| 693 | b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask; |
| 694 | b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask; |
| 695 | |
| 696 | if (b1 != b2) |
| 697 | return 1; |
| 698 | |
| 699 | /* Now update the length and offsets to take account of the bits |
| 700 | we've just compared. */ |
| 701 | length_bits -= bits; |
| 702 | offset1_bits += bits; |
| 703 | offset2_bits += bits; |
| 704 | } |
| 705 | |
| 706 | if (length_bits % TARGET_CHAR_BIT != 0) |
| 707 | { |
| 708 | size_t bits; |
| 709 | size_t o1, o2; |
| 710 | gdb_byte mask, b1, b2; |
| 711 | |
| 712 | /* The length is not an exact number of bytes. After the previous |
| 713 | IF.. block then the offsets are byte aligned, or the |
| 714 | length is zero (in which case this code is not reached). Compare |
| 715 | a number of bits at the end of the region, starting from an exact |
| 716 | byte boundary. */ |
| 717 | bits = length_bits % TARGET_CHAR_BIT; |
| 718 | o1 = offset1_bits + length_bits - bits; |
| 719 | o2 = offset2_bits + length_bits - bits; |
| 720 | |
| 721 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); |
| 722 | mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits); |
| 723 | |
| 724 | gdb_assert (o1 % TARGET_CHAR_BIT == 0); |
| 725 | gdb_assert (o2 % TARGET_CHAR_BIT == 0); |
| 726 | |
| 727 | b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask; |
| 728 | b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask; |
| 729 | |
| 730 | if (b1 != b2) |
| 731 | return 1; |
| 732 | |
| 733 | length_bits -= bits; |
| 734 | } |
| 735 | |
| 736 | if (length_bits > 0) |
| 737 | { |
| 738 | /* We've now taken care of any stray "bits" at the start, or end of |
| 739 | the region to compare, the remainder can be covered with a simple |
| 740 | memcmp. */ |
| 741 | gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0); |
| 742 | gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0); |
| 743 | gdb_assert (length_bits % TARGET_CHAR_BIT == 0); |
| 744 | |
| 745 | return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT, |
| 746 | ptr2 + offset2_bits / TARGET_CHAR_BIT, |
| 747 | length_bits / TARGET_CHAR_BIT); |
| 748 | } |
| 749 | |
| 750 | /* Length is zero, regions match. */ |
| 751 | return 0; |
| 752 | } |
| 753 | |
| 754 | /* Helper struct for find_first_range_overlap_and_match and |
| 755 | value_contents_bits_eq. Keep track of which slot of a given ranges |
| 756 | vector have we last looked at. */ |
| 757 | |
| 758 | struct ranges_and_idx |
| 759 | { |
| 760 | /* The ranges. */ |
| 761 | const std::vector<range> *ranges; |
| 762 | |
| 763 | /* The range we've last found in RANGES. Given ranges are sorted, |
| 764 | we can start the next lookup here. */ |
| 765 | int idx; |
| 766 | }; |
| 767 | |
| 768 | /* Helper function for value_contents_bits_eq. Compare LENGTH bits of |
| 769 | RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's |
| 770 | ranges starting at OFFSET2 bits. Return true if the ranges match |
| 771 | and fill in *L and *H with the overlapping window relative to |
| 772 | (both) OFFSET1 or OFFSET2. */ |
| 773 | |
| 774 | static int |
| 775 | find_first_range_overlap_and_match (struct ranges_and_idx *rp1, |
| 776 | struct ranges_and_idx *rp2, |
| 777 | LONGEST offset1, LONGEST offset2, |
| 778 | LONGEST length, ULONGEST *l, ULONGEST *h) |
| 779 | { |
| 780 | rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx, |
| 781 | offset1, length); |
| 782 | rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx, |
| 783 | offset2, length); |
| 784 | |
| 785 | if (rp1->idx == -1 && rp2->idx == -1) |
| 786 | { |
| 787 | *l = length; |
| 788 | *h = length; |
| 789 | return 1; |
| 790 | } |
| 791 | else if (rp1->idx == -1 || rp2->idx == -1) |
| 792 | return 0; |
| 793 | else |
| 794 | { |
| 795 | const range *r1, *r2; |
| 796 | ULONGEST l1, h1; |
| 797 | ULONGEST l2, h2; |
| 798 | |
| 799 | r1 = &(*rp1->ranges)[rp1->idx]; |
| 800 | r2 = &(*rp2->ranges)[rp2->idx]; |
| 801 | |
| 802 | /* Get the unavailable windows intersected by the incoming |
| 803 | ranges. The first and last ranges that overlap the argument |
| 804 | range may be wider than said incoming arguments ranges. */ |
| 805 | l1 = std::max (offset1, r1->offset); |
| 806 | h1 = std::min (offset1 + length, r1->offset + r1->length); |
| 807 | |
| 808 | l2 = std::max (offset2, r2->offset); |
| 809 | h2 = std::min (offset2 + length, offset2 + r2->length); |
| 810 | |
| 811 | /* Make them relative to the respective start offsets, so we can |
| 812 | compare them for equality. */ |
| 813 | l1 -= offset1; |
| 814 | h1 -= offset1; |
| 815 | |
| 816 | l2 -= offset2; |
| 817 | h2 -= offset2; |
| 818 | |
| 819 | /* Different ranges, no match. */ |
| 820 | if (l1 != l2 || h1 != h2) |
| 821 | return 0; |
| 822 | |
| 823 | *h = h1; |
| 824 | *l = l1; |
| 825 | return 1; |
| 826 | } |
| 827 | } |
| 828 | |
| 829 | /* Helper function for value_contents_eq. The only difference is that |
| 830 | this function is bit rather than byte based. |
| 831 | |
| 832 | Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits |
| 833 | with LENGTH bits of VAL2's contents starting at OFFSET2 bits. |
| 834 | Return true if the available bits match. */ |
| 835 | |
| 836 | static bool |
| 837 | value_contents_bits_eq (const struct value *val1, int offset1, |
| 838 | const struct value *val2, int offset2, |
| 839 | int length) |
| 840 | { |
| 841 | /* Each array element corresponds to a ranges source (unavailable, |
| 842 | optimized out). '1' is for VAL1, '2' for VAL2. */ |
| 843 | struct ranges_and_idx rp1[2], rp2[2]; |
| 844 | |
| 845 | /* See function description in value.h. */ |
| 846 | gdb_assert (!val1->lazy && !val2->lazy); |
| 847 | |
| 848 | /* We shouldn't be trying to compare past the end of the values. */ |
| 849 | gdb_assert (offset1 + length |
| 850 | <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT); |
| 851 | gdb_assert (offset2 + length |
| 852 | <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT); |
| 853 | |
| 854 | memset (&rp1, 0, sizeof (rp1)); |
| 855 | memset (&rp2, 0, sizeof (rp2)); |
| 856 | rp1[0].ranges = &val1->unavailable; |
| 857 | rp2[0].ranges = &val2->unavailable; |
| 858 | rp1[1].ranges = &val1->optimized_out; |
| 859 | rp2[1].ranges = &val2->optimized_out; |
| 860 | |
| 861 | while (length > 0) |
| 862 | { |
| 863 | ULONGEST l = 0, h = 0; /* init for gcc -Wall */ |
| 864 | int i; |
| 865 | |
| 866 | for (i = 0; i < 2; i++) |
| 867 | { |
| 868 | ULONGEST l_tmp, h_tmp; |
| 869 | |
| 870 | /* The contents only match equal if the invalid/unavailable |
| 871 | contents ranges match as well. */ |
| 872 | if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i], |
| 873 | offset1, offset2, length, |
| 874 | &l_tmp, &h_tmp)) |
| 875 | return false; |
| 876 | |
| 877 | /* We're interested in the lowest/first range found. */ |
| 878 | if (i == 0 || l_tmp < l) |
| 879 | { |
| 880 | l = l_tmp; |
| 881 | h = h_tmp; |
| 882 | } |
| 883 | } |
| 884 | |
| 885 | /* Compare the available/valid contents. */ |
| 886 | if (memcmp_with_bit_offsets (val1->contents.get (), offset1, |
| 887 | val2->contents.get (), offset2, l) != 0) |
| 888 | return false; |
| 889 | |
| 890 | length -= h; |
| 891 | offset1 += h; |
| 892 | offset2 += h; |
| 893 | } |
| 894 | |
| 895 | return true; |
| 896 | } |
| 897 | |
| 898 | bool |
| 899 | value_contents_eq (const struct value *val1, LONGEST offset1, |
| 900 | const struct value *val2, LONGEST offset2, |
| 901 | LONGEST length) |
| 902 | { |
| 903 | return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT, |
| 904 | val2, offset2 * TARGET_CHAR_BIT, |
| 905 | length * TARGET_CHAR_BIT); |
| 906 | } |
| 907 | |
| 908 | |
| 909 | /* The value-history records all the values printed by print commands |
| 910 | during this session. */ |
| 911 | |
| 912 | static std::vector<value_ref_ptr> value_history; |
| 913 | |
| 914 | \f |
| 915 | /* List of all value objects currently allocated |
| 916 | (except for those released by calls to release_value) |
| 917 | This is so they can be freed after each command. */ |
| 918 | |
| 919 | static std::vector<value_ref_ptr> all_values; |
| 920 | |
| 921 | /* Allocate a lazy value for type TYPE. Its actual content is |
| 922 | "lazily" allocated too: the content field of the return value is |
| 923 | NULL; it will be allocated when it is fetched from the target. */ |
| 924 | |
| 925 | struct value * |
| 926 | allocate_value_lazy (struct type *type) |
| 927 | { |
| 928 | struct value *val; |
| 929 | |
| 930 | /* Call check_typedef on our type to make sure that, if TYPE |
| 931 | is a TYPE_CODE_TYPEDEF, its length is set to the length |
| 932 | of the target type instead of zero. However, we do not |
| 933 | replace the typedef type by the target type, because we want |
| 934 | to keep the typedef in order to be able to set the VAL's type |
| 935 | description correctly. */ |
| 936 | check_typedef (type); |
| 937 | |
| 938 | val = new struct value (type); |
| 939 | |
| 940 | /* Values start out on the all_values chain. */ |
| 941 | all_values.emplace_back (val); |
| 942 | |
| 943 | return val; |
| 944 | } |
| 945 | |
| 946 | /* The maximum size, in bytes, that GDB will try to allocate for a value. |
| 947 | The initial value of 64k was not selected for any specific reason, it is |
| 948 | just a reasonable starting point. */ |
| 949 | |
| 950 | static int max_value_size = 65536; /* 64k bytes */ |
| 951 | |
| 952 | /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of |
| 953 | LONGEST, otherwise GDB will not be able to parse integer values from the |
| 954 | CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would |
| 955 | be unable to parse "set max-value-size 2". |
| 956 | |
| 957 | As we want a consistent GDB experience across hosts with different sizes |
| 958 | of LONGEST, this arbitrary minimum value was selected, so long as this |
| 959 | is bigger than LONGEST on all GDB supported hosts we're fine. */ |
| 960 | |
| 961 | #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16 |
| 962 | gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE); |
| 963 | |
| 964 | /* Implement the "set max-value-size" command. */ |
| 965 | |
| 966 | static void |
| 967 | set_max_value_size (const char *args, int from_tty, |
| 968 | struct cmd_list_element *c) |
| 969 | { |
| 970 | gdb_assert (max_value_size == -1 || max_value_size >= 0); |
| 971 | |
| 972 | if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE) |
| 973 | { |
| 974 | max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE; |
| 975 | error (_("max-value-size set too low, increasing to %d bytes"), |
| 976 | max_value_size); |
| 977 | } |
| 978 | } |
| 979 | |
| 980 | /* Implement the "show max-value-size" command. */ |
| 981 | |
| 982 | static void |
| 983 | show_max_value_size (struct ui_file *file, int from_tty, |
| 984 | struct cmd_list_element *c, const char *value) |
| 985 | { |
| 986 | if (max_value_size == -1) |
| 987 | fprintf_filtered (file, _("Maximum value size is unlimited.\n")); |
| 988 | else |
| 989 | fprintf_filtered (file, _("Maximum value size is %d bytes.\n"), |
| 990 | max_value_size); |
| 991 | } |
| 992 | |
| 993 | /* Called before we attempt to allocate or reallocate a buffer for the |
| 994 | contents of a value. TYPE is the type of the value for which we are |
| 995 | allocating the buffer. If the buffer is too large (based on the user |
| 996 | controllable setting) then throw an error. If this function returns |
| 997 | then we should attempt to allocate the buffer. */ |
| 998 | |
| 999 | static void |
| 1000 | check_type_length_before_alloc (const struct type *type) |
| 1001 | { |
| 1002 | ULONGEST length = TYPE_LENGTH (type); |
| 1003 | |
| 1004 | if (max_value_size > -1 && length > max_value_size) |
| 1005 | { |
| 1006 | if (type->name () != NULL) |
| 1007 | error (_("value of type `%s' requires %s bytes, which is more " |
| 1008 | "than max-value-size"), type->name (), pulongest (length)); |
| 1009 | else |
| 1010 | error (_("value requires %s bytes, which is more than " |
| 1011 | "max-value-size"), pulongest (length)); |
| 1012 | } |
| 1013 | } |
| 1014 | |
| 1015 | /* Allocate the contents of VAL if it has not been allocated yet. */ |
| 1016 | |
| 1017 | static void |
| 1018 | allocate_value_contents (struct value *val) |
| 1019 | { |
| 1020 | if (!val->contents) |
| 1021 | { |
| 1022 | check_type_length_before_alloc (val->enclosing_type); |
| 1023 | val->contents.reset |
| 1024 | ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type))); |
| 1025 | } |
| 1026 | } |
| 1027 | |
| 1028 | /* Allocate a value and its contents for type TYPE. */ |
| 1029 | |
| 1030 | struct value * |
| 1031 | allocate_value (struct type *type) |
| 1032 | { |
| 1033 | struct value *val = allocate_value_lazy (type); |
| 1034 | |
| 1035 | allocate_value_contents (val); |
| 1036 | val->lazy = 0; |
| 1037 | return val; |
| 1038 | } |
| 1039 | |
| 1040 | /* Allocate a value that has the correct length |
| 1041 | for COUNT repetitions of type TYPE. */ |
| 1042 | |
| 1043 | struct value * |
| 1044 | allocate_repeat_value (struct type *type, int count) |
| 1045 | { |
| 1046 | /* Despite the fact that we are really creating an array of TYPE here, we |
| 1047 | use the string lower bound as the array lower bound. This seems to |
| 1048 | work fine for now. */ |
| 1049 | int low_bound = current_language->string_lower_bound (); |
| 1050 | /* FIXME-type-allocation: need a way to free this type when we are |
| 1051 | done with it. */ |
| 1052 | struct type *array_type |
| 1053 | = lookup_array_range_type (type, low_bound, count + low_bound - 1); |
| 1054 | |
| 1055 | return allocate_value (array_type); |
| 1056 | } |
| 1057 | |
| 1058 | struct value * |
| 1059 | allocate_computed_value (struct type *type, |
| 1060 | const struct lval_funcs *funcs, |
| 1061 | void *closure) |
| 1062 | { |
| 1063 | struct value *v = allocate_value_lazy (type); |
| 1064 | |
| 1065 | VALUE_LVAL (v) = lval_computed; |
| 1066 | v->location.computed.funcs = funcs; |
| 1067 | v->location.computed.closure = closure; |
| 1068 | |
| 1069 | return v; |
| 1070 | } |
| 1071 | |
| 1072 | /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */ |
| 1073 | |
| 1074 | struct value * |
| 1075 | allocate_optimized_out_value (struct type *type) |
| 1076 | { |
| 1077 | struct value *retval = allocate_value_lazy (type); |
| 1078 | |
| 1079 | mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type)); |
| 1080 | set_value_lazy (retval, 0); |
| 1081 | return retval; |
| 1082 | } |
| 1083 | |
| 1084 | /* Accessor methods. */ |
| 1085 | |
| 1086 | struct type * |
| 1087 | value_type (const struct value *value) |
| 1088 | { |
| 1089 | return value->type; |
| 1090 | } |
| 1091 | void |
| 1092 | deprecated_set_value_type (struct value *value, struct type *type) |
| 1093 | { |
| 1094 | value->type = type; |
| 1095 | } |
| 1096 | |
| 1097 | LONGEST |
| 1098 | value_offset (const struct value *value) |
| 1099 | { |
| 1100 | return value->offset; |
| 1101 | } |
| 1102 | void |
| 1103 | set_value_offset (struct value *value, LONGEST offset) |
| 1104 | { |
| 1105 | value->offset = offset; |
| 1106 | } |
| 1107 | |
| 1108 | LONGEST |
| 1109 | value_bitpos (const struct value *value) |
| 1110 | { |
| 1111 | return value->bitpos; |
| 1112 | } |
| 1113 | void |
| 1114 | set_value_bitpos (struct value *value, LONGEST bit) |
| 1115 | { |
| 1116 | value->bitpos = bit; |
| 1117 | } |
| 1118 | |
| 1119 | LONGEST |
| 1120 | value_bitsize (const struct value *value) |
| 1121 | { |
| 1122 | return value->bitsize; |
| 1123 | } |
| 1124 | void |
| 1125 | set_value_bitsize (struct value *value, LONGEST bit) |
| 1126 | { |
| 1127 | value->bitsize = bit; |
| 1128 | } |
| 1129 | |
| 1130 | struct value * |
| 1131 | value_parent (const struct value *value) |
| 1132 | { |
| 1133 | return value->parent.get (); |
| 1134 | } |
| 1135 | |
| 1136 | /* See value.h. */ |
| 1137 | |
| 1138 | void |
| 1139 | set_value_parent (struct value *value, struct value *parent) |
| 1140 | { |
| 1141 | value->parent = value_ref_ptr::new_reference (parent); |
| 1142 | } |
| 1143 | |
| 1144 | gdb_byte * |
| 1145 | value_contents_raw (struct value *value) |
| 1146 | { |
| 1147 | struct gdbarch *arch = get_value_arch (value); |
| 1148 | int unit_size = gdbarch_addressable_memory_unit_size (arch); |
| 1149 | |
| 1150 | allocate_value_contents (value); |
| 1151 | return value->contents.get () + value->embedded_offset * unit_size; |
| 1152 | } |
| 1153 | |
| 1154 | gdb_byte * |
| 1155 | value_contents_all_raw (struct value *value) |
| 1156 | { |
| 1157 | allocate_value_contents (value); |
| 1158 | return value->contents.get (); |
| 1159 | } |
| 1160 | |
| 1161 | struct type * |
| 1162 | value_enclosing_type (const struct value *value) |
| 1163 | { |
| 1164 | return value->enclosing_type; |
| 1165 | } |
| 1166 | |
| 1167 | /* Look at value.h for description. */ |
| 1168 | |
| 1169 | struct type * |
| 1170 | value_actual_type (struct value *value, int resolve_simple_types, |
| 1171 | int *real_type_found) |
| 1172 | { |
| 1173 | struct value_print_options opts; |
| 1174 | struct type *result; |
| 1175 | |
| 1176 | get_user_print_options (&opts); |
| 1177 | |
| 1178 | if (real_type_found) |
| 1179 | *real_type_found = 0; |
| 1180 | result = value_type (value); |
| 1181 | if (opts.objectprint) |
| 1182 | { |
| 1183 | /* If result's target type is TYPE_CODE_STRUCT, proceed to |
| 1184 | fetch its rtti type. */ |
| 1185 | if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result)) |
| 1186 | && (check_typedef (TYPE_TARGET_TYPE (result))->code () |
| 1187 | == TYPE_CODE_STRUCT) |
| 1188 | && !value_optimized_out (value)) |
| 1189 | { |
| 1190 | struct type *real_type; |
| 1191 | |
| 1192 | real_type = value_rtti_indirect_type (value, NULL, NULL, NULL); |
| 1193 | if (real_type) |
| 1194 | { |
| 1195 | if (real_type_found) |
| 1196 | *real_type_found = 1; |
| 1197 | result = real_type; |
| 1198 | } |
| 1199 | } |
| 1200 | else if (resolve_simple_types) |
| 1201 | { |
| 1202 | if (real_type_found) |
| 1203 | *real_type_found = 1; |
| 1204 | result = value_enclosing_type (value); |
| 1205 | } |
| 1206 | } |
| 1207 | |
| 1208 | return result; |
| 1209 | } |
| 1210 | |
| 1211 | void |
| 1212 | error_value_optimized_out (void) |
| 1213 | { |
| 1214 | error (_("value has been optimized out")); |
| 1215 | } |
| 1216 | |
| 1217 | static void |
| 1218 | require_not_optimized_out (const struct value *value) |
| 1219 | { |
| 1220 | if (!value->optimized_out.empty ()) |
| 1221 | { |
| 1222 | if (value->lval == lval_register) |
| 1223 | error (_("register has not been saved in frame")); |
| 1224 | else |
| 1225 | error_value_optimized_out (); |
| 1226 | } |
| 1227 | } |
| 1228 | |
| 1229 | static void |
| 1230 | require_available (const struct value *value) |
| 1231 | { |
| 1232 | if (!value->unavailable.empty ()) |
| 1233 | throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); |
| 1234 | } |
| 1235 | |
| 1236 | const gdb_byte * |
| 1237 | value_contents_for_printing (struct value *value) |
| 1238 | { |
| 1239 | if (value->lazy) |
| 1240 | value_fetch_lazy (value); |
| 1241 | return value->contents.get (); |
| 1242 | } |
| 1243 | |
| 1244 | const gdb_byte * |
| 1245 | value_contents_for_printing_const (const struct value *value) |
| 1246 | { |
| 1247 | gdb_assert (!value->lazy); |
| 1248 | return value->contents.get (); |
| 1249 | } |
| 1250 | |
| 1251 | const gdb_byte * |
| 1252 | value_contents_all (struct value *value) |
| 1253 | { |
| 1254 | const gdb_byte *result = value_contents_for_printing (value); |
| 1255 | require_not_optimized_out (value); |
| 1256 | require_available (value); |
| 1257 | return result; |
| 1258 | } |
| 1259 | |
| 1260 | /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET, |
| 1261 | SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */ |
| 1262 | |
| 1263 | static void |
| 1264 | ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset, |
| 1265 | const std::vector<range> &src_range, int src_bit_offset, |
| 1266 | int bit_length) |
| 1267 | { |
| 1268 | for (const range &r : src_range) |
| 1269 | { |
| 1270 | ULONGEST h, l; |
| 1271 | |
| 1272 | l = std::max (r.offset, (LONGEST) src_bit_offset); |
| 1273 | h = std::min (r.offset + r.length, |
| 1274 | (LONGEST) src_bit_offset + bit_length); |
| 1275 | |
| 1276 | if (l < h) |
| 1277 | insert_into_bit_range_vector (dst_range, |
| 1278 | dst_bit_offset + (l - src_bit_offset), |
| 1279 | h - l); |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET, |
| 1284 | SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */ |
| 1285 | |
| 1286 | static void |
| 1287 | value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset, |
| 1288 | const struct value *src, int src_bit_offset, |
| 1289 | int bit_length) |
| 1290 | { |
| 1291 | ranges_copy_adjusted (&dst->unavailable, dst_bit_offset, |
| 1292 | src->unavailable, src_bit_offset, |
| 1293 | bit_length); |
| 1294 | ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset, |
| 1295 | src->optimized_out, src_bit_offset, |
| 1296 | bit_length); |
| 1297 | } |
| 1298 | |
| 1299 | /* Copy LENGTH target addressable memory units of SRC value's (all) contents |
| 1300 | (value_contents_all) starting at SRC_OFFSET, into DST value's (all) |
| 1301 | contents, starting at DST_OFFSET. If unavailable contents are |
| 1302 | being copied from SRC, the corresponding DST contents are marked |
| 1303 | unavailable accordingly. Neither DST nor SRC may be lazy |
| 1304 | values. |
| 1305 | |
| 1306 | It is assumed the contents of DST in the [DST_OFFSET, |
| 1307 | DST_OFFSET+LENGTH) range are wholly available. */ |
| 1308 | |
| 1309 | static void |
| 1310 | value_contents_copy_raw (struct value *dst, LONGEST dst_offset, |
| 1311 | struct value *src, LONGEST src_offset, LONGEST length) |
| 1312 | { |
| 1313 | LONGEST src_bit_offset, dst_bit_offset, bit_length; |
| 1314 | struct gdbarch *arch = get_value_arch (src); |
| 1315 | int unit_size = gdbarch_addressable_memory_unit_size (arch); |
| 1316 | |
| 1317 | /* A lazy DST would make that this copy operation useless, since as |
| 1318 | soon as DST's contents were un-lazied (by a later value_contents |
| 1319 | call, say), the contents would be overwritten. A lazy SRC would |
| 1320 | mean we'd be copying garbage. */ |
| 1321 | gdb_assert (!dst->lazy && !src->lazy); |
| 1322 | |
| 1323 | /* The overwritten DST range gets unavailability ORed in, not |
| 1324 | replaced. Make sure to remember to implement replacing if it |
| 1325 | turns out actually necessary. */ |
| 1326 | gdb_assert (value_bytes_available (dst, dst_offset, length)); |
| 1327 | gdb_assert (!value_bits_any_optimized_out (dst, |
| 1328 | TARGET_CHAR_BIT * dst_offset, |
| 1329 | TARGET_CHAR_BIT * length)); |
| 1330 | |
| 1331 | /* Copy the data. */ |
| 1332 | memcpy (value_contents_all_raw (dst) + dst_offset * unit_size, |
| 1333 | value_contents_all_raw (src) + src_offset * unit_size, |
| 1334 | length * unit_size); |
| 1335 | |
| 1336 | /* Copy the meta-data, adjusted. */ |
| 1337 | src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT; |
| 1338 | dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT; |
| 1339 | bit_length = length * unit_size * HOST_CHAR_BIT; |
| 1340 | |
| 1341 | value_ranges_copy_adjusted (dst, dst_bit_offset, |
| 1342 | src, src_bit_offset, |
| 1343 | bit_length); |
| 1344 | } |
| 1345 | |
| 1346 | /* Copy LENGTH bytes of SRC value's (all) contents |
| 1347 | (value_contents_all) starting at SRC_OFFSET byte, into DST value's |
| 1348 | (all) contents, starting at DST_OFFSET. If unavailable contents |
| 1349 | are being copied from SRC, the corresponding DST contents are |
| 1350 | marked unavailable accordingly. DST must not be lazy. If SRC is |
| 1351 | lazy, it will be fetched now. |
| 1352 | |
| 1353 | It is assumed the contents of DST in the [DST_OFFSET, |
| 1354 | DST_OFFSET+LENGTH) range are wholly available. */ |
| 1355 | |
| 1356 | void |
| 1357 | value_contents_copy (struct value *dst, LONGEST dst_offset, |
| 1358 | struct value *src, LONGEST src_offset, LONGEST length) |
| 1359 | { |
| 1360 | if (src->lazy) |
| 1361 | value_fetch_lazy (src); |
| 1362 | |
| 1363 | value_contents_copy_raw (dst, dst_offset, src, src_offset, length); |
| 1364 | } |
| 1365 | |
| 1366 | int |
| 1367 | value_lazy (const struct value *value) |
| 1368 | { |
| 1369 | return value->lazy; |
| 1370 | } |
| 1371 | |
| 1372 | void |
| 1373 | set_value_lazy (struct value *value, int val) |
| 1374 | { |
| 1375 | value->lazy = val; |
| 1376 | } |
| 1377 | |
| 1378 | int |
| 1379 | value_stack (const struct value *value) |
| 1380 | { |
| 1381 | return value->stack; |
| 1382 | } |
| 1383 | |
| 1384 | void |
| 1385 | set_value_stack (struct value *value, int val) |
| 1386 | { |
| 1387 | value->stack = val; |
| 1388 | } |
| 1389 | |
| 1390 | const gdb_byte * |
| 1391 | value_contents (struct value *value) |
| 1392 | { |
| 1393 | const gdb_byte *result = value_contents_writeable (value); |
| 1394 | require_not_optimized_out (value); |
| 1395 | require_available (value); |
| 1396 | return result; |
| 1397 | } |
| 1398 | |
| 1399 | gdb_byte * |
| 1400 | value_contents_writeable (struct value *value) |
| 1401 | { |
| 1402 | if (value->lazy) |
| 1403 | value_fetch_lazy (value); |
| 1404 | return value_contents_raw (value); |
| 1405 | } |
| 1406 | |
| 1407 | int |
| 1408 | value_optimized_out (struct value *value) |
| 1409 | { |
| 1410 | /* We can only know if a value is optimized out once we have tried to |
| 1411 | fetch it. */ |
| 1412 | if (value->optimized_out.empty () && value->lazy) |
| 1413 | { |
| 1414 | try |
| 1415 | { |
| 1416 | value_fetch_lazy (value); |
| 1417 | } |
| 1418 | catch (const gdb_exception_error &ex) |
| 1419 | { |
| 1420 | switch (ex.error) |
| 1421 | { |
| 1422 | case MEMORY_ERROR: |
| 1423 | case OPTIMIZED_OUT_ERROR: |
| 1424 | case NOT_AVAILABLE_ERROR: |
| 1425 | /* These can normally happen when we try to access an |
| 1426 | optimized out or unavailable register, either in a |
| 1427 | physical register or spilled to memory. */ |
| 1428 | break; |
| 1429 | default: |
| 1430 | throw; |
| 1431 | } |
| 1432 | } |
| 1433 | } |
| 1434 | |
| 1435 | return !value->optimized_out.empty (); |
| 1436 | } |
| 1437 | |
| 1438 | /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and |
| 1439 | the following LENGTH bytes. */ |
| 1440 | |
| 1441 | void |
| 1442 | mark_value_bytes_optimized_out (struct value *value, int offset, int length) |
| 1443 | { |
| 1444 | mark_value_bits_optimized_out (value, |
| 1445 | offset * TARGET_CHAR_BIT, |
| 1446 | length * TARGET_CHAR_BIT); |
| 1447 | } |
| 1448 | |
| 1449 | /* See value.h. */ |
| 1450 | |
| 1451 | void |
| 1452 | mark_value_bits_optimized_out (struct value *value, |
| 1453 | LONGEST offset, LONGEST length) |
| 1454 | { |
| 1455 | insert_into_bit_range_vector (&value->optimized_out, offset, length); |
| 1456 | } |
| 1457 | |
| 1458 | int |
| 1459 | value_bits_synthetic_pointer (const struct value *value, |
| 1460 | LONGEST offset, LONGEST length) |
| 1461 | { |
| 1462 | if (value->lval != lval_computed |
| 1463 | || !value->location.computed.funcs->check_synthetic_pointer) |
| 1464 | return 0; |
| 1465 | return value->location.computed.funcs->check_synthetic_pointer (value, |
| 1466 | offset, |
| 1467 | length); |
| 1468 | } |
| 1469 | |
| 1470 | LONGEST |
| 1471 | value_embedded_offset (const struct value *value) |
| 1472 | { |
| 1473 | return value->embedded_offset; |
| 1474 | } |
| 1475 | |
| 1476 | void |
| 1477 | set_value_embedded_offset (struct value *value, LONGEST val) |
| 1478 | { |
| 1479 | value->embedded_offset = val; |
| 1480 | } |
| 1481 | |
| 1482 | LONGEST |
| 1483 | value_pointed_to_offset (const struct value *value) |
| 1484 | { |
| 1485 | return value->pointed_to_offset; |
| 1486 | } |
| 1487 | |
| 1488 | void |
| 1489 | set_value_pointed_to_offset (struct value *value, LONGEST val) |
| 1490 | { |
| 1491 | value->pointed_to_offset = val; |
| 1492 | } |
| 1493 | |
| 1494 | const struct lval_funcs * |
| 1495 | value_computed_funcs (const struct value *v) |
| 1496 | { |
| 1497 | gdb_assert (value_lval_const (v) == lval_computed); |
| 1498 | |
| 1499 | return v->location.computed.funcs; |
| 1500 | } |
| 1501 | |
| 1502 | void * |
| 1503 | value_computed_closure (const struct value *v) |
| 1504 | { |
| 1505 | gdb_assert (v->lval == lval_computed); |
| 1506 | |
| 1507 | return v->location.computed.closure; |
| 1508 | } |
| 1509 | |
| 1510 | enum lval_type * |
| 1511 | deprecated_value_lval_hack (struct value *value) |
| 1512 | { |
| 1513 | return &value->lval; |
| 1514 | } |
| 1515 | |
| 1516 | enum lval_type |
| 1517 | value_lval_const (const struct value *value) |
| 1518 | { |
| 1519 | return value->lval; |
| 1520 | } |
| 1521 | |
| 1522 | CORE_ADDR |
| 1523 | value_address (const struct value *value) |
| 1524 | { |
| 1525 | if (value->lval != lval_memory) |
| 1526 | return 0; |
| 1527 | if (value->parent != NULL) |
| 1528 | return value_address (value->parent.get ()) + value->offset; |
| 1529 | if (NULL != TYPE_DATA_LOCATION (value_type (value))) |
| 1530 | { |
| 1531 | gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value))); |
| 1532 | return TYPE_DATA_LOCATION_ADDR (value_type (value)); |
| 1533 | } |
| 1534 | |
| 1535 | return value->location.address + value->offset; |
| 1536 | } |
| 1537 | |
| 1538 | CORE_ADDR |
| 1539 | value_raw_address (const struct value *value) |
| 1540 | { |
| 1541 | if (value->lval != lval_memory) |
| 1542 | return 0; |
| 1543 | return value->location.address; |
| 1544 | } |
| 1545 | |
| 1546 | void |
| 1547 | set_value_address (struct value *value, CORE_ADDR addr) |
| 1548 | { |
| 1549 | gdb_assert (value->lval == lval_memory); |
| 1550 | value->location.address = addr; |
| 1551 | } |
| 1552 | |
| 1553 | struct internalvar ** |
| 1554 | deprecated_value_internalvar_hack (struct value *value) |
| 1555 | { |
| 1556 | return &value->location.internalvar; |
| 1557 | } |
| 1558 | |
| 1559 | struct frame_id * |
| 1560 | deprecated_value_next_frame_id_hack (struct value *value) |
| 1561 | { |
| 1562 | gdb_assert (value->lval == lval_register); |
| 1563 | return &value->location.reg.next_frame_id; |
| 1564 | } |
| 1565 | |
| 1566 | int * |
| 1567 | deprecated_value_regnum_hack (struct value *value) |
| 1568 | { |
| 1569 | gdb_assert (value->lval == lval_register); |
| 1570 | return &value->location.reg.regnum; |
| 1571 | } |
| 1572 | |
| 1573 | int |
| 1574 | deprecated_value_modifiable (const struct value *value) |
| 1575 | { |
| 1576 | return value->modifiable; |
| 1577 | } |
| 1578 | \f |
| 1579 | /* Return a mark in the value chain. All values allocated after the |
| 1580 | mark is obtained (except for those released) are subject to being freed |
| 1581 | if a subsequent value_free_to_mark is passed the mark. */ |
| 1582 | struct value * |
| 1583 | value_mark (void) |
| 1584 | { |
| 1585 | if (all_values.empty ()) |
| 1586 | return nullptr; |
| 1587 | return all_values.back ().get (); |
| 1588 | } |
| 1589 | |
| 1590 | /* See value.h. */ |
| 1591 | |
| 1592 | void |
| 1593 | value_incref (struct value *val) |
| 1594 | { |
| 1595 | val->reference_count++; |
| 1596 | } |
| 1597 | |
| 1598 | /* Release a reference to VAL, which was acquired with value_incref. |
| 1599 | This function is also called to deallocate values from the value |
| 1600 | chain. */ |
| 1601 | |
| 1602 | void |
| 1603 | value_decref (struct value *val) |
| 1604 | { |
| 1605 | if (val != nullptr) |
| 1606 | { |
| 1607 | gdb_assert (val->reference_count > 0); |
| 1608 | val->reference_count--; |
| 1609 | if (val->reference_count == 0) |
| 1610 | delete val; |
| 1611 | } |
| 1612 | } |
| 1613 | |
| 1614 | /* Free all values allocated since MARK was obtained by value_mark |
| 1615 | (except for those released). */ |
| 1616 | void |
| 1617 | value_free_to_mark (const struct value *mark) |
| 1618 | { |
| 1619 | auto iter = std::find (all_values.begin (), all_values.end (), mark); |
| 1620 | if (iter == all_values.end ()) |
| 1621 | all_values.clear (); |
| 1622 | else |
| 1623 | all_values.erase (iter + 1, all_values.end ()); |
| 1624 | } |
| 1625 | |
| 1626 | /* Remove VAL from the chain all_values |
| 1627 | so it will not be freed automatically. */ |
| 1628 | |
| 1629 | value_ref_ptr |
| 1630 | release_value (struct value *val) |
| 1631 | { |
| 1632 | if (val == nullptr) |
| 1633 | return value_ref_ptr (); |
| 1634 | |
| 1635 | std::vector<value_ref_ptr>::reverse_iterator iter; |
| 1636 | for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter) |
| 1637 | { |
| 1638 | if (*iter == val) |
| 1639 | { |
| 1640 | value_ref_ptr result = *iter; |
| 1641 | all_values.erase (iter.base () - 1); |
| 1642 | return result; |
| 1643 | } |
| 1644 | } |
| 1645 | |
| 1646 | /* We must always return an owned reference. Normally this happens |
| 1647 | because we transfer the reference from the value chain, but in |
| 1648 | this case the value was not on the chain. */ |
| 1649 | return value_ref_ptr::new_reference (val); |
| 1650 | } |
| 1651 | |
| 1652 | /* See value.h. */ |
| 1653 | |
| 1654 | std::vector<value_ref_ptr> |
| 1655 | value_release_to_mark (const struct value *mark) |
| 1656 | { |
| 1657 | std::vector<value_ref_ptr> result; |
| 1658 | |
| 1659 | auto iter = std::find (all_values.begin (), all_values.end (), mark); |
| 1660 | if (iter == all_values.end ()) |
| 1661 | std::swap (result, all_values); |
| 1662 | else |
| 1663 | { |
| 1664 | std::move (iter + 1, all_values.end (), std::back_inserter (result)); |
| 1665 | all_values.erase (iter + 1, all_values.end ()); |
| 1666 | } |
| 1667 | std::reverse (result.begin (), result.end ()); |
| 1668 | return result; |
| 1669 | } |
| 1670 | |
| 1671 | /* Return a copy of the value ARG. |
| 1672 | It contains the same contents, for same memory address, |
| 1673 | but it's a different block of storage. */ |
| 1674 | |
| 1675 | struct value * |
| 1676 | value_copy (struct value *arg) |
| 1677 | { |
| 1678 | struct type *encl_type = value_enclosing_type (arg); |
| 1679 | struct value *val; |
| 1680 | |
| 1681 | if (value_lazy (arg)) |
| 1682 | val = allocate_value_lazy (encl_type); |
| 1683 | else |
| 1684 | val = allocate_value (encl_type); |
| 1685 | val->type = arg->type; |
| 1686 | VALUE_LVAL (val) = VALUE_LVAL (arg); |
| 1687 | val->location = arg->location; |
| 1688 | val->offset = arg->offset; |
| 1689 | val->bitpos = arg->bitpos; |
| 1690 | val->bitsize = arg->bitsize; |
| 1691 | val->lazy = arg->lazy; |
| 1692 | val->embedded_offset = value_embedded_offset (arg); |
| 1693 | val->pointed_to_offset = arg->pointed_to_offset; |
| 1694 | val->modifiable = arg->modifiable; |
| 1695 | if (!value_lazy (val)) |
| 1696 | { |
| 1697 | memcpy (value_contents_all_raw (val), value_contents_all_raw (arg), |
| 1698 | TYPE_LENGTH (value_enclosing_type (arg))); |
| 1699 | |
| 1700 | } |
| 1701 | val->unavailable = arg->unavailable; |
| 1702 | val->optimized_out = arg->optimized_out; |
| 1703 | val->parent = arg->parent; |
| 1704 | if (VALUE_LVAL (val) == lval_computed) |
| 1705 | { |
| 1706 | const struct lval_funcs *funcs = val->location.computed.funcs; |
| 1707 | |
| 1708 | if (funcs->copy_closure) |
| 1709 | val->location.computed.closure = funcs->copy_closure (val); |
| 1710 | } |
| 1711 | return val; |
| 1712 | } |
| 1713 | |
| 1714 | /* Return a "const" and/or "volatile" qualified version of the value V. |
| 1715 | If CNST is true, then the returned value will be qualified with |
| 1716 | "const". |
| 1717 | if VOLTL is true, then the returned value will be qualified with |
| 1718 | "volatile". */ |
| 1719 | |
| 1720 | struct value * |
| 1721 | make_cv_value (int cnst, int voltl, struct value *v) |
| 1722 | { |
| 1723 | struct type *val_type = value_type (v); |
| 1724 | struct type *enclosing_type = value_enclosing_type (v); |
| 1725 | struct value *cv_val = value_copy (v); |
| 1726 | |
| 1727 | deprecated_set_value_type (cv_val, |
| 1728 | make_cv_type (cnst, voltl, val_type, NULL)); |
| 1729 | set_value_enclosing_type (cv_val, |
| 1730 | make_cv_type (cnst, voltl, enclosing_type, NULL)); |
| 1731 | |
| 1732 | return cv_val; |
| 1733 | } |
| 1734 | |
| 1735 | /* Return a version of ARG that is non-lvalue. */ |
| 1736 | |
| 1737 | struct value * |
| 1738 | value_non_lval (struct value *arg) |
| 1739 | { |
| 1740 | if (VALUE_LVAL (arg) != not_lval) |
| 1741 | { |
| 1742 | struct type *enc_type = value_enclosing_type (arg); |
| 1743 | struct value *val = allocate_value (enc_type); |
| 1744 | |
| 1745 | memcpy (value_contents_all_raw (val), value_contents_all (arg), |
| 1746 | TYPE_LENGTH (enc_type)); |
| 1747 | val->type = arg->type; |
| 1748 | set_value_embedded_offset (val, value_embedded_offset (arg)); |
| 1749 | set_value_pointed_to_offset (val, value_pointed_to_offset (arg)); |
| 1750 | return val; |
| 1751 | } |
| 1752 | return arg; |
| 1753 | } |
| 1754 | |
| 1755 | /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */ |
| 1756 | |
| 1757 | void |
| 1758 | value_force_lval (struct value *v, CORE_ADDR addr) |
| 1759 | { |
| 1760 | gdb_assert (VALUE_LVAL (v) == not_lval); |
| 1761 | |
| 1762 | write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v))); |
| 1763 | v->lval = lval_memory; |
| 1764 | v->location.address = addr; |
| 1765 | } |
| 1766 | |
| 1767 | void |
| 1768 | set_value_component_location (struct value *component, |
| 1769 | const struct value *whole) |
| 1770 | { |
| 1771 | struct type *type; |
| 1772 | |
| 1773 | gdb_assert (whole->lval != lval_xcallable); |
| 1774 | |
| 1775 | if (whole->lval == lval_internalvar) |
| 1776 | VALUE_LVAL (component) = lval_internalvar_component; |
| 1777 | else |
| 1778 | VALUE_LVAL (component) = whole->lval; |
| 1779 | |
| 1780 | component->location = whole->location; |
| 1781 | if (whole->lval == lval_computed) |
| 1782 | { |
| 1783 | const struct lval_funcs *funcs = whole->location.computed.funcs; |
| 1784 | |
| 1785 | if (funcs->copy_closure) |
| 1786 | component->location.computed.closure = funcs->copy_closure (whole); |
| 1787 | } |
| 1788 | |
| 1789 | /* If the WHOLE value has a dynamically resolved location property then |
| 1790 | update the address of the COMPONENT. */ |
| 1791 | type = value_type (whole); |
| 1792 | if (NULL != TYPE_DATA_LOCATION (type) |
| 1793 | && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST) |
| 1794 | set_value_address (component, TYPE_DATA_LOCATION_ADDR (type)); |
| 1795 | |
| 1796 | /* Similarly, if the COMPONENT value has a dynamically resolved location |
| 1797 | property then update its address. */ |
| 1798 | type = value_type (component); |
| 1799 | if (NULL != TYPE_DATA_LOCATION (type) |
| 1800 | && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST) |
| 1801 | { |
| 1802 | /* If the COMPONENT has a dynamic location, and is an |
| 1803 | lval_internalvar_component, then we change it to a lval_memory. |
| 1804 | |
| 1805 | Usually a component of an internalvar is created non-lazy, and has |
| 1806 | its content immediately copied from the parent internalvar. |
| 1807 | However, for components with a dynamic location, the content of |
| 1808 | the component is not contained within the parent, but is instead |
| 1809 | accessed indirectly. Further, the component will be created as a |
| 1810 | lazy value. |
| 1811 | |
| 1812 | By changing the type of the component to lval_memory we ensure |
| 1813 | that value_fetch_lazy can successfully load the component. |
| 1814 | |
| 1815 | This solution isn't ideal, but a real fix would require values to |
| 1816 | carry around both the parent value contents, and the contents of |
| 1817 | any dynamic fields within the parent. This is a substantial |
| 1818 | change to how values work in GDB. */ |
| 1819 | if (VALUE_LVAL (component) == lval_internalvar_component) |
| 1820 | { |
| 1821 | gdb_assert (value_lazy (component)); |
| 1822 | VALUE_LVAL (component) = lval_memory; |
| 1823 | } |
| 1824 | else |
| 1825 | gdb_assert (VALUE_LVAL (component) == lval_memory); |
| 1826 | set_value_address (component, TYPE_DATA_LOCATION_ADDR (type)); |
| 1827 | } |
| 1828 | } |
| 1829 | |
| 1830 | /* Access to the value history. */ |
| 1831 | |
| 1832 | /* Record a new value in the value history. |
| 1833 | Returns the absolute history index of the entry. */ |
| 1834 | |
| 1835 | int |
| 1836 | record_latest_value (struct value *val) |
| 1837 | { |
| 1838 | /* We don't want this value to have anything to do with the inferior anymore. |
| 1839 | In particular, "set $1 = 50" should not affect the variable from which |
| 1840 | the value was taken, and fast watchpoints should be able to assume that |
| 1841 | a value on the value history never changes. */ |
| 1842 | if (value_lazy (val)) |
| 1843 | value_fetch_lazy (val); |
| 1844 | /* We preserve VALUE_LVAL so that the user can find out where it was fetched |
| 1845 | from. This is a bit dubious, because then *&$1 does not just return $1 |
| 1846 | but the current contents of that location. c'est la vie... */ |
| 1847 | val->modifiable = 0; |
| 1848 | |
| 1849 | value_history.push_back (release_value (val)); |
| 1850 | |
| 1851 | return value_history.size (); |
| 1852 | } |
| 1853 | |
| 1854 | /* Return a copy of the value in the history with sequence number NUM. */ |
| 1855 | |
| 1856 | struct value * |
| 1857 | access_value_history (int num) |
| 1858 | { |
| 1859 | int absnum = num; |
| 1860 | |
| 1861 | if (absnum <= 0) |
| 1862 | absnum += value_history.size (); |
| 1863 | |
| 1864 | if (absnum <= 0) |
| 1865 | { |
| 1866 | if (num == 0) |
| 1867 | error (_("The history is empty.")); |
| 1868 | else if (num == 1) |
| 1869 | error (_("There is only one value in the history.")); |
| 1870 | else |
| 1871 | error (_("History does not go back to $$%d."), -num); |
| 1872 | } |
| 1873 | if (absnum > value_history.size ()) |
| 1874 | error (_("History has not yet reached $%d."), absnum); |
| 1875 | |
| 1876 | absnum--; |
| 1877 | |
| 1878 | return value_copy (value_history[absnum].get ()); |
| 1879 | } |
| 1880 | |
| 1881 | static void |
| 1882 | show_values (const char *num_exp, int from_tty) |
| 1883 | { |
| 1884 | int i; |
| 1885 | struct value *val; |
| 1886 | static int num = 1; |
| 1887 | |
| 1888 | if (num_exp) |
| 1889 | { |
| 1890 | /* "show values +" should print from the stored position. |
| 1891 | "show values <exp>" should print around value number <exp>. */ |
| 1892 | if (num_exp[0] != '+' || num_exp[1] != '\0') |
| 1893 | num = parse_and_eval_long (num_exp) - 5; |
| 1894 | } |
| 1895 | else |
| 1896 | { |
| 1897 | /* "show values" means print the last 10 values. */ |
| 1898 | num = value_history.size () - 9; |
| 1899 | } |
| 1900 | |
| 1901 | if (num <= 0) |
| 1902 | num = 1; |
| 1903 | |
| 1904 | for (i = num; i < num + 10 && i <= value_history.size (); i++) |
| 1905 | { |
| 1906 | struct value_print_options opts; |
| 1907 | |
| 1908 | val = access_value_history (i); |
| 1909 | printf_filtered (("$%d = "), i); |
| 1910 | get_user_print_options (&opts); |
| 1911 | value_print (val, gdb_stdout, &opts); |
| 1912 | printf_filtered (("\n")); |
| 1913 | } |
| 1914 | |
| 1915 | /* The next "show values +" should start after what we just printed. */ |
| 1916 | num += 10; |
| 1917 | |
| 1918 | /* Hitting just return after this command should do the same thing as |
| 1919 | "show values +". If num_exp is null, this is unnecessary, since |
| 1920 | "show values +" is not useful after "show values". */ |
| 1921 | if (from_tty && num_exp) |
| 1922 | set_repeat_arguments ("+"); |
| 1923 | } |
| 1924 | \f |
| 1925 | enum internalvar_kind |
| 1926 | { |
| 1927 | /* The internal variable is empty. */ |
| 1928 | INTERNALVAR_VOID, |
| 1929 | |
| 1930 | /* The value of the internal variable is provided directly as |
| 1931 | a GDB value object. */ |
| 1932 | INTERNALVAR_VALUE, |
| 1933 | |
| 1934 | /* A fresh value is computed via a call-back routine on every |
| 1935 | access to the internal variable. */ |
| 1936 | INTERNALVAR_MAKE_VALUE, |
| 1937 | |
| 1938 | /* The internal variable holds a GDB internal convenience function. */ |
| 1939 | INTERNALVAR_FUNCTION, |
| 1940 | |
| 1941 | /* The variable holds an integer value. */ |
| 1942 | INTERNALVAR_INTEGER, |
| 1943 | |
| 1944 | /* The variable holds a GDB-provided string. */ |
| 1945 | INTERNALVAR_STRING, |
| 1946 | }; |
| 1947 | |
| 1948 | union internalvar_data |
| 1949 | { |
| 1950 | /* A value object used with INTERNALVAR_VALUE. */ |
| 1951 | struct value *value; |
| 1952 | |
| 1953 | /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */ |
| 1954 | struct |
| 1955 | { |
| 1956 | /* The functions to call. */ |
| 1957 | const struct internalvar_funcs *functions; |
| 1958 | |
| 1959 | /* The function's user-data. */ |
| 1960 | void *data; |
| 1961 | } make_value; |
| 1962 | |
| 1963 | /* The internal function used with INTERNALVAR_FUNCTION. */ |
| 1964 | struct |
| 1965 | { |
| 1966 | struct internal_function *function; |
| 1967 | /* True if this is the canonical name for the function. */ |
| 1968 | int canonical; |
| 1969 | } fn; |
| 1970 | |
| 1971 | /* An integer value used with INTERNALVAR_INTEGER. */ |
| 1972 | struct |
| 1973 | { |
| 1974 | /* If type is non-NULL, it will be used as the type to generate |
| 1975 | a value for this internal variable. If type is NULL, a default |
| 1976 | integer type for the architecture is used. */ |
| 1977 | struct type *type; |
| 1978 | LONGEST val; |
| 1979 | } integer; |
| 1980 | |
| 1981 | /* A string value used with INTERNALVAR_STRING. */ |
| 1982 | char *string; |
| 1983 | }; |
| 1984 | |
| 1985 | /* Internal variables. These are variables within the debugger |
| 1986 | that hold values assigned by debugger commands. |
| 1987 | The user refers to them with a '$' prefix |
| 1988 | that does not appear in the variable names stored internally. */ |
| 1989 | |
| 1990 | struct internalvar |
| 1991 | { |
| 1992 | struct internalvar *next; |
| 1993 | char *name; |
| 1994 | |
| 1995 | /* We support various different kinds of content of an internal variable. |
| 1996 | enum internalvar_kind specifies the kind, and union internalvar_data |
| 1997 | provides the data associated with this particular kind. */ |
| 1998 | |
| 1999 | enum internalvar_kind kind; |
| 2000 | |
| 2001 | union internalvar_data u; |
| 2002 | }; |
| 2003 | |
| 2004 | static struct internalvar *internalvars; |
| 2005 | |
| 2006 | /* If the variable does not already exist create it and give it the |
| 2007 | value given. If no value is given then the default is zero. */ |
| 2008 | static void |
| 2009 | init_if_undefined_command (const char* args, int from_tty) |
| 2010 | { |
| 2011 | struct internalvar *intvar = nullptr; |
| 2012 | |
| 2013 | /* Parse the expression - this is taken from set_command(). */ |
| 2014 | expression_up expr = parse_expression (args); |
| 2015 | |
| 2016 | /* Validate the expression. |
| 2017 | Was the expression an assignment? |
| 2018 | Or even an expression at all? */ |
| 2019 | if (expr->first_opcode () != BINOP_ASSIGN) |
| 2020 | error (_("Init-if-undefined requires an assignment expression.")); |
| 2021 | |
| 2022 | /* Extract the variable from the parsed expression. */ |
| 2023 | expr::assign_operation *assign |
| 2024 | = dynamic_cast<expr::assign_operation *> (expr->op.get ()); |
| 2025 | if (assign != nullptr) |
| 2026 | { |
| 2027 | expr::operation *lhs = assign->get_lhs (); |
| 2028 | expr::internalvar_operation *ivarop |
| 2029 | = dynamic_cast<expr::internalvar_operation *> (lhs); |
| 2030 | if (ivarop != nullptr) |
| 2031 | intvar = ivarop->get_internalvar (); |
| 2032 | } |
| 2033 | |
| 2034 | if (intvar == nullptr) |
| 2035 | error (_("The first parameter to init-if-undefined " |
| 2036 | "should be a GDB variable.")); |
| 2037 | |
| 2038 | /* Only evaluate the expression if the lvalue is void. |
| 2039 | This may still fail if the expression is invalid. */ |
| 2040 | if (intvar->kind == INTERNALVAR_VOID) |
| 2041 | evaluate_expression (expr.get ()); |
| 2042 | } |
| 2043 | |
| 2044 | |
| 2045 | /* Look up an internal variable with name NAME. NAME should not |
| 2046 | normally include a dollar sign. |
| 2047 | |
| 2048 | If the specified internal variable does not exist, |
| 2049 | the return value is NULL. */ |
| 2050 | |
| 2051 | struct internalvar * |
| 2052 | lookup_only_internalvar (const char *name) |
| 2053 | { |
| 2054 | struct internalvar *var; |
| 2055 | |
| 2056 | for (var = internalvars; var; var = var->next) |
| 2057 | if (strcmp (var->name, name) == 0) |
| 2058 | return var; |
| 2059 | |
| 2060 | return NULL; |
| 2061 | } |
| 2062 | |
| 2063 | /* Complete NAME by comparing it to the names of internal |
| 2064 | variables. */ |
| 2065 | |
| 2066 | void |
| 2067 | complete_internalvar (completion_tracker &tracker, const char *name) |
| 2068 | { |
| 2069 | struct internalvar *var; |
| 2070 | int len; |
| 2071 | |
| 2072 | len = strlen (name); |
| 2073 | |
| 2074 | for (var = internalvars; var; var = var->next) |
| 2075 | if (strncmp (var->name, name, len) == 0) |
| 2076 | tracker.add_completion (make_unique_xstrdup (var->name)); |
| 2077 | } |
| 2078 | |
| 2079 | /* Create an internal variable with name NAME and with a void value. |
| 2080 | NAME should not normally include a dollar sign. */ |
| 2081 | |
| 2082 | struct internalvar * |
| 2083 | create_internalvar (const char *name) |
| 2084 | { |
| 2085 | struct internalvar *var = XNEW (struct internalvar); |
| 2086 | |
| 2087 | var->name = xstrdup (name); |
| 2088 | var->kind = INTERNALVAR_VOID; |
| 2089 | var->next = internalvars; |
| 2090 | internalvars = var; |
| 2091 | return var; |
| 2092 | } |
| 2093 | |
| 2094 | /* Create an internal variable with name NAME and register FUN as the |
| 2095 | function that value_of_internalvar uses to create a value whenever |
| 2096 | this variable is referenced. NAME should not normally include a |
| 2097 | dollar sign. DATA is passed uninterpreted to FUN when it is |
| 2098 | called. CLEANUP, if not NULL, is called when the internal variable |
| 2099 | is destroyed. It is passed DATA as its only argument. */ |
| 2100 | |
| 2101 | struct internalvar * |
| 2102 | create_internalvar_type_lazy (const char *name, |
| 2103 | const struct internalvar_funcs *funcs, |
| 2104 | void *data) |
| 2105 | { |
| 2106 | struct internalvar *var = create_internalvar (name); |
| 2107 | |
| 2108 | var->kind = INTERNALVAR_MAKE_VALUE; |
| 2109 | var->u.make_value.functions = funcs; |
| 2110 | var->u.make_value.data = data; |
| 2111 | return var; |
| 2112 | } |
| 2113 | |
| 2114 | /* See documentation in value.h. */ |
| 2115 | |
| 2116 | int |
| 2117 | compile_internalvar_to_ax (struct internalvar *var, |
| 2118 | struct agent_expr *expr, |
| 2119 | struct axs_value *value) |
| 2120 | { |
| 2121 | if (var->kind != INTERNALVAR_MAKE_VALUE |
| 2122 | || var->u.make_value.functions->compile_to_ax == NULL) |
| 2123 | return 0; |
| 2124 | |
| 2125 | var->u.make_value.functions->compile_to_ax (var, expr, value, |
| 2126 | var->u.make_value.data); |
| 2127 | return 1; |
| 2128 | } |
| 2129 | |
| 2130 | /* Look up an internal variable with name NAME. NAME should not |
| 2131 | normally include a dollar sign. |
| 2132 | |
| 2133 | If the specified internal variable does not exist, |
| 2134 | one is created, with a void value. */ |
| 2135 | |
| 2136 | struct internalvar * |
| 2137 | lookup_internalvar (const char *name) |
| 2138 | { |
| 2139 | struct internalvar *var; |
| 2140 | |
| 2141 | var = lookup_only_internalvar (name); |
| 2142 | if (var) |
| 2143 | return var; |
| 2144 | |
| 2145 | return create_internalvar (name); |
| 2146 | } |
| 2147 | |
| 2148 | /* Return current value of internal variable VAR. For variables that |
| 2149 | are not inherently typed, use a value type appropriate for GDBARCH. */ |
| 2150 | |
| 2151 | struct value * |
| 2152 | value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) |
| 2153 | { |
| 2154 | struct value *val; |
| 2155 | struct trace_state_variable *tsv; |
| 2156 | |
| 2157 | /* If there is a trace state variable of the same name, assume that |
| 2158 | is what we really want to see. */ |
| 2159 | tsv = find_trace_state_variable (var->name); |
| 2160 | if (tsv) |
| 2161 | { |
| 2162 | tsv->value_known = target_get_trace_state_variable_value (tsv->number, |
| 2163 | &(tsv->value)); |
| 2164 | if (tsv->value_known) |
| 2165 | val = value_from_longest (builtin_type (gdbarch)->builtin_int64, |
| 2166 | tsv->value); |
| 2167 | else |
| 2168 | val = allocate_value (builtin_type (gdbarch)->builtin_void); |
| 2169 | return val; |
| 2170 | } |
| 2171 | |
| 2172 | switch (var->kind) |
| 2173 | { |
| 2174 | case INTERNALVAR_VOID: |
| 2175 | val = allocate_value (builtin_type (gdbarch)->builtin_void); |
| 2176 | break; |
| 2177 | |
| 2178 | case INTERNALVAR_FUNCTION: |
| 2179 | val = allocate_value (builtin_type (gdbarch)->internal_fn); |
| 2180 | break; |
| 2181 | |
| 2182 | case INTERNALVAR_INTEGER: |
| 2183 | if (!var->u.integer.type) |
| 2184 | val = value_from_longest (builtin_type (gdbarch)->builtin_int, |
| 2185 | var->u.integer.val); |
| 2186 | else |
| 2187 | val = value_from_longest (var->u.integer.type, var->u.integer.val); |
| 2188 | break; |
| 2189 | |
| 2190 | case INTERNALVAR_STRING: |
| 2191 | val = value_cstring (var->u.string, strlen (var->u.string), |
| 2192 | builtin_type (gdbarch)->builtin_char); |
| 2193 | break; |
| 2194 | |
| 2195 | case INTERNALVAR_VALUE: |
| 2196 | val = value_copy (var->u.value); |
| 2197 | if (value_lazy (val)) |
| 2198 | value_fetch_lazy (val); |
| 2199 | break; |
| 2200 | |
| 2201 | case INTERNALVAR_MAKE_VALUE: |
| 2202 | val = (*var->u.make_value.functions->make_value) (gdbarch, var, |
| 2203 | var->u.make_value.data); |
| 2204 | break; |
| 2205 | |
| 2206 | default: |
| 2207 | internal_error (__FILE__, __LINE__, _("bad kind")); |
| 2208 | } |
| 2209 | |
| 2210 | /* Change the VALUE_LVAL to lval_internalvar so that future operations |
| 2211 | on this value go back to affect the original internal variable. |
| 2212 | |
| 2213 | Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have |
| 2214 | no underlying modifiable state in the internal variable. |
| 2215 | |
| 2216 | Likewise, if the variable's value is a computed lvalue, we want |
| 2217 | references to it to produce another computed lvalue, where |
| 2218 | references and assignments actually operate through the |
| 2219 | computed value's functions. |
| 2220 | |
| 2221 | This means that internal variables with computed values |
| 2222 | behave a little differently from other internal variables: |
| 2223 | assignments to them don't just replace the previous value |
| 2224 | altogether. At the moment, this seems like the behavior we |
| 2225 | want. */ |
| 2226 | |
| 2227 | if (var->kind != INTERNALVAR_MAKE_VALUE |
| 2228 | && val->lval != lval_computed) |
| 2229 | { |
| 2230 | VALUE_LVAL (val) = lval_internalvar; |
| 2231 | VALUE_INTERNALVAR (val) = var; |
| 2232 | } |
| 2233 | |
| 2234 | return val; |
| 2235 | } |
| 2236 | |
| 2237 | int |
| 2238 | get_internalvar_integer (struct internalvar *var, LONGEST *result) |
| 2239 | { |
| 2240 | if (var->kind == INTERNALVAR_INTEGER) |
| 2241 | { |
| 2242 | *result = var->u.integer.val; |
| 2243 | return 1; |
| 2244 | } |
| 2245 | |
| 2246 | if (var->kind == INTERNALVAR_VALUE) |
| 2247 | { |
| 2248 | struct type *type = check_typedef (value_type (var->u.value)); |
| 2249 | |
| 2250 | if (type->code () == TYPE_CODE_INT) |
| 2251 | { |
| 2252 | *result = value_as_long (var->u.value); |
| 2253 | return 1; |
| 2254 | } |
| 2255 | } |
| 2256 | |
| 2257 | return 0; |
| 2258 | } |
| 2259 | |
| 2260 | static int |
| 2261 | get_internalvar_function (struct internalvar *var, |
| 2262 | struct internal_function **result) |
| 2263 | { |
| 2264 | switch (var->kind) |
| 2265 | { |
| 2266 | case INTERNALVAR_FUNCTION: |
| 2267 | *result = var->u.fn.function; |
| 2268 | return 1; |
| 2269 | |
| 2270 | default: |
| 2271 | return 0; |
| 2272 | } |
| 2273 | } |
| 2274 | |
| 2275 | void |
| 2276 | set_internalvar_component (struct internalvar *var, |
| 2277 | LONGEST offset, LONGEST bitpos, |
| 2278 | LONGEST bitsize, struct value *newval) |
| 2279 | { |
| 2280 | gdb_byte *addr; |
| 2281 | struct gdbarch *arch; |
| 2282 | int unit_size; |
| 2283 | |
| 2284 | switch (var->kind) |
| 2285 | { |
| 2286 | case INTERNALVAR_VALUE: |
| 2287 | addr = value_contents_writeable (var->u.value); |
| 2288 | arch = get_value_arch (var->u.value); |
| 2289 | unit_size = gdbarch_addressable_memory_unit_size (arch); |
| 2290 | |
| 2291 | if (bitsize) |
| 2292 | modify_field (value_type (var->u.value), addr + offset, |
| 2293 | value_as_long (newval), bitpos, bitsize); |
| 2294 | else |
| 2295 | memcpy (addr + offset * unit_size, value_contents (newval), |
| 2296 | TYPE_LENGTH (value_type (newval))); |
| 2297 | break; |
| 2298 | |
| 2299 | default: |
| 2300 | /* We can never get a component of any other kind. */ |
| 2301 | internal_error (__FILE__, __LINE__, _("set_internalvar_component")); |
| 2302 | } |
| 2303 | } |
| 2304 | |
| 2305 | void |
| 2306 | set_internalvar (struct internalvar *var, struct value *val) |
| 2307 | { |
| 2308 | enum internalvar_kind new_kind; |
| 2309 | union internalvar_data new_data = { 0 }; |
| 2310 | |
| 2311 | if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical) |
| 2312 | error (_("Cannot overwrite convenience function %s"), var->name); |
| 2313 | |
| 2314 | /* Prepare new contents. */ |
| 2315 | switch (check_typedef (value_type (val))->code ()) |
| 2316 | { |
| 2317 | case TYPE_CODE_VOID: |
| 2318 | new_kind = INTERNALVAR_VOID; |
| 2319 | break; |
| 2320 | |
| 2321 | case TYPE_CODE_INTERNAL_FUNCTION: |
| 2322 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); |
| 2323 | new_kind = INTERNALVAR_FUNCTION; |
| 2324 | get_internalvar_function (VALUE_INTERNALVAR (val), |
| 2325 | &new_data.fn.function); |
| 2326 | /* Copies created here are never canonical. */ |
| 2327 | break; |
| 2328 | |
| 2329 | default: |
| 2330 | new_kind = INTERNALVAR_VALUE; |
| 2331 | struct value *copy = value_copy (val); |
| 2332 | copy->modifiable = 1; |
| 2333 | |
| 2334 | /* Force the value to be fetched from the target now, to avoid problems |
| 2335 | later when this internalvar is referenced and the target is gone or |
| 2336 | has changed. */ |
| 2337 | if (value_lazy (copy)) |
| 2338 | value_fetch_lazy (copy); |
| 2339 | |
| 2340 | /* Release the value from the value chain to prevent it from being |
| 2341 | deleted by free_all_values. From here on this function should not |
| 2342 | call error () until new_data is installed into the var->u to avoid |
| 2343 | leaking memory. */ |
| 2344 | new_data.value = release_value (copy).release (); |
| 2345 | |
| 2346 | /* Internal variables which are created from values with a dynamic |
| 2347 | location don't need the location property of the origin anymore. |
| 2348 | The resolved dynamic location is used prior then any other address |
| 2349 | when accessing the value. |
| 2350 | If we keep it, we would still refer to the origin value. |
| 2351 | Remove the location property in case it exist. */ |
| 2352 | value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION); |
| 2353 | |
| 2354 | break; |
| 2355 | } |
| 2356 | |
| 2357 | /* Clean up old contents. */ |
| 2358 | clear_internalvar (var); |
| 2359 | |
| 2360 | /* Switch over. */ |
| 2361 | var->kind = new_kind; |
| 2362 | var->u = new_data; |
| 2363 | /* End code which must not call error(). */ |
| 2364 | } |
| 2365 | |
| 2366 | void |
| 2367 | set_internalvar_integer (struct internalvar *var, LONGEST l) |
| 2368 | { |
| 2369 | /* Clean up old contents. */ |
| 2370 | clear_internalvar (var); |
| 2371 | |
| 2372 | var->kind = INTERNALVAR_INTEGER; |
| 2373 | var->u.integer.type = NULL; |
| 2374 | var->u.integer.val = l; |
| 2375 | } |
| 2376 | |
| 2377 | void |
| 2378 | set_internalvar_string (struct internalvar *var, const char *string) |
| 2379 | { |
| 2380 | /* Clean up old contents. */ |
| 2381 | clear_internalvar (var); |
| 2382 | |
| 2383 | var->kind = INTERNALVAR_STRING; |
| 2384 | var->u.string = xstrdup (string); |
| 2385 | } |
| 2386 | |
| 2387 | static void |
| 2388 | set_internalvar_function (struct internalvar *var, struct internal_function *f) |
| 2389 | { |
| 2390 | /* Clean up old contents. */ |
| 2391 | clear_internalvar (var); |
| 2392 | |
| 2393 | var->kind = INTERNALVAR_FUNCTION; |
| 2394 | var->u.fn.function = f; |
| 2395 | var->u.fn.canonical = 1; |
| 2396 | /* Variables installed here are always the canonical version. */ |
| 2397 | } |
| 2398 | |
| 2399 | void |
| 2400 | clear_internalvar (struct internalvar *var) |
| 2401 | { |
| 2402 | /* Clean up old contents. */ |
| 2403 | switch (var->kind) |
| 2404 | { |
| 2405 | case INTERNALVAR_VALUE: |
| 2406 | value_decref (var->u.value); |
| 2407 | break; |
| 2408 | |
| 2409 | case INTERNALVAR_STRING: |
| 2410 | xfree (var->u.string); |
| 2411 | break; |
| 2412 | |
| 2413 | case INTERNALVAR_MAKE_VALUE: |
| 2414 | if (var->u.make_value.functions->destroy != NULL) |
| 2415 | var->u.make_value.functions->destroy (var->u.make_value.data); |
| 2416 | break; |
| 2417 | |
| 2418 | default: |
| 2419 | break; |
| 2420 | } |
| 2421 | |
| 2422 | /* Reset to void kind. */ |
| 2423 | var->kind = INTERNALVAR_VOID; |
| 2424 | } |
| 2425 | |
| 2426 | const char * |
| 2427 | internalvar_name (const struct internalvar *var) |
| 2428 | { |
| 2429 | return var->name; |
| 2430 | } |
| 2431 | |
| 2432 | static struct internal_function * |
| 2433 | create_internal_function (const char *name, |
| 2434 | internal_function_fn handler, void *cookie) |
| 2435 | { |
| 2436 | struct internal_function *ifn = XNEW (struct internal_function); |
| 2437 | |
| 2438 | ifn->name = xstrdup (name); |
| 2439 | ifn->handler = handler; |
| 2440 | ifn->cookie = cookie; |
| 2441 | return ifn; |
| 2442 | } |
| 2443 | |
| 2444 | const char * |
| 2445 | value_internal_function_name (struct value *val) |
| 2446 | { |
| 2447 | struct internal_function *ifn; |
| 2448 | int result; |
| 2449 | |
| 2450 | gdb_assert (VALUE_LVAL (val) == lval_internalvar); |
| 2451 | result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn); |
| 2452 | gdb_assert (result); |
| 2453 | |
| 2454 | return ifn->name; |
| 2455 | } |
| 2456 | |
| 2457 | struct value * |
| 2458 | call_internal_function (struct gdbarch *gdbarch, |
| 2459 | const struct language_defn *language, |
| 2460 | struct value *func, int argc, struct value **argv) |
| 2461 | { |
| 2462 | struct internal_function *ifn; |
| 2463 | int result; |
| 2464 | |
| 2465 | gdb_assert (VALUE_LVAL (func) == lval_internalvar); |
| 2466 | result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn); |
| 2467 | gdb_assert (result); |
| 2468 | |
| 2469 | return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv); |
| 2470 | } |
| 2471 | |
| 2472 | /* The 'function' command. This does nothing -- it is just a |
| 2473 | placeholder to let "help function NAME" work. This is also used as |
| 2474 | the implementation of the sub-command that is created when |
| 2475 | registering an internal function. */ |
| 2476 | static void |
| 2477 | function_command (const char *command, int from_tty) |
| 2478 | { |
| 2479 | /* Do nothing. */ |
| 2480 | } |
| 2481 | |
| 2482 | /* Helper function that does the work for add_internal_function. */ |
| 2483 | |
| 2484 | static struct cmd_list_element * |
| 2485 | do_add_internal_function (const char *name, const char *doc, |
| 2486 | internal_function_fn handler, void *cookie) |
| 2487 | { |
| 2488 | struct internal_function *ifn; |
| 2489 | struct internalvar *var = lookup_internalvar (name); |
| 2490 | |
| 2491 | ifn = create_internal_function (name, handler, cookie); |
| 2492 | set_internalvar_function (var, ifn); |
| 2493 | |
| 2494 | return add_cmd (name, no_class, function_command, doc, &functionlist); |
| 2495 | } |
| 2496 | |
| 2497 | /* See value.h. */ |
| 2498 | |
| 2499 | void |
| 2500 | add_internal_function (const char *name, const char *doc, |
| 2501 | internal_function_fn handler, void *cookie) |
| 2502 | { |
| 2503 | do_add_internal_function (name, doc, handler, cookie); |
| 2504 | } |
| 2505 | |
| 2506 | /* See value.h. */ |
| 2507 | |
| 2508 | void |
| 2509 | add_internal_function (gdb::unique_xmalloc_ptr<char> &&name, |
| 2510 | gdb::unique_xmalloc_ptr<char> &&doc, |
| 2511 | internal_function_fn handler, void *cookie) |
| 2512 | { |
| 2513 | struct cmd_list_element *cmd |
| 2514 | = do_add_internal_function (name.get (), doc.get (), handler, cookie); |
| 2515 | doc.release (); |
| 2516 | cmd->doc_allocated = 1; |
| 2517 | name.release (); |
| 2518 | cmd->name_allocated = 1; |
| 2519 | } |
| 2520 | |
| 2521 | /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to |
| 2522 | prevent cycles / duplicates. */ |
| 2523 | |
| 2524 | void |
| 2525 | preserve_one_value (struct value *value, struct objfile *objfile, |
| 2526 | htab_t copied_types) |
| 2527 | { |
| 2528 | if (value->type->objfile_owner () == objfile) |
| 2529 | value->type = copy_type_recursive (objfile, value->type, copied_types); |
| 2530 | |
| 2531 | if (value->enclosing_type->objfile_owner () == objfile) |
| 2532 | value->enclosing_type = copy_type_recursive (objfile, |
| 2533 | value->enclosing_type, |
| 2534 | copied_types); |
| 2535 | } |
| 2536 | |
| 2537 | /* Likewise for internal variable VAR. */ |
| 2538 | |
| 2539 | static void |
| 2540 | preserve_one_internalvar (struct internalvar *var, struct objfile *objfile, |
| 2541 | htab_t copied_types) |
| 2542 | { |
| 2543 | switch (var->kind) |
| 2544 | { |
| 2545 | case INTERNALVAR_INTEGER: |
| 2546 | if (var->u.integer.type |
| 2547 | && var->u.integer.type->objfile_owner () == objfile) |
| 2548 | var->u.integer.type |
| 2549 | = copy_type_recursive (objfile, var->u.integer.type, copied_types); |
| 2550 | break; |
| 2551 | |
| 2552 | case INTERNALVAR_VALUE: |
| 2553 | preserve_one_value (var->u.value, objfile, copied_types); |
| 2554 | break; |
| 2555 | } |
| 2556 | } |
| 2557 | |
| 2558 | /* Update the internal variables and value history when OBJFILE is |
| 2559 | discarded; we must copy the types out of the objfile. New global types |
| 2560 | will be created for every convenience variable which currently points to |
| 2561 | this objfile's types, and the convenience variables will be adjusted to |
| 2562 | use the new global types. */ |
| 2563 | |
| 2564 | void |
| 2565 | preserve_values (struct objfile *objfile) |
| 2566 | { |
| 2567 | struct internalvar *var; |
| 2568 | |
| 2569 | /* Create the hash table. We allocate on the objfile's obstack, since |
| 2570 | it is soon to be deleted. */ |
| 2571 | htab_up copied_types = create_copied_types_hash (objfile); |
| 2572 | |
| 2573 | for (const value_ref_ptr &item : value_history) |
| 2574 | preserve_one_value (item.get (), objfile, copied_types.get ()); |
| 2575 | |
| 2576 | for (var = internalvars; var; var = var->next) |
| 2577 | preserve_one_internalvar (var, objfile, copied_types.get ()); |
| 2578 | |
| 2579 | preserve_ext_lang_values (objfile, copied_types.get ()); |
| 2580 | } |
| 2581 | |
| 2582 | static void |
| 2583 | show_convenience (const char *ignore, int from_tty) |
| 2584 | { |
| 2585 | struct gdbarch *gdbarch = get_current_arch (); |
| 2586 | struct internalvar *var; |
| 2587 | int varseen = 0; |
| 2588 | struct value_print_options opts; |
| 2589 | |
| 2590 | get_user_print_options (&opts); |
| 2591 | for (var = internalvars; var; var = var->next) |
| 2592 | { |
| 2593 | |
| 2594 | if (!varseen) |
| 2595 | { |
| 2596 | varseen = 1; |
| 2597 | } |
| 2598 | printf_filtered (("$%s = "), var->name); |
| 2599 | |
| 2600 | try |
| 2601 | { |
| 2602 | struct value *val; |
| 2603 | |
| 2604 | val = value_of_internalvar (gdbarch, var); |
| 2605 | value_print (val, gdb_stdout, &opts); |
| 2606 | } |
| 2607 | catch (const gdb_exception_error &ex) |
| 2608 | { |
| 2609 | fprintf_styled (gdb_stdout, metadata_style.style (), |
| 2610 | _("<error: %s>"), ex.what ()); |
| 2611 | } |
| 2612 | |
| 2613 | printf_filtered (("\n")); |
| 2614 | } |
| 2615 | if (!varseen) |
| 2616 | { |
| 2617 | /* This text does not mention convenience functions on purpose. |
| 2618 | The user can't create them except via Python, and if Python support |
| 2619 | is installed this message will never be printed ($_streq will |
| 2620 | exist). */ |
| 2621 | printf_unfiltered (_("No debugger convenience variables now defined.\n" |
| 2622 | "Convenience variables have " |
| 2623 | "names starting with \"$\";\n" |
| 2624 | "use \"set\" as in \"set " |
| 2625 | "$foo = 5\" to define them.\n")); |
| 2626 | } |
| 2627 | } |
| 2628 | \f |
| 2629 | |
| 2630 | /* See value.h. */ |
| 2631 | |
| 2632 | struct value * |
| 2633 | value_from_xmethod (xmethod_worker_up &&worker) |
| 2634 | { |
| 2635 | struct value *v; |
| 2636 | |
| 2637 | v = allocate_value (builtin_type (target_gdbarch ())->xmethod); |
| 2638 | v->lval = lval_xcallable; |
| 2639 | v->location.xm_worker = worker.release (); |
| 2640 | v->modifiable = 0; |
| 2641 | |
| 2642 | return v; |
| 2643 | } |
| 2644 | |
| 2645 | /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */ |
| 2646 | |
| 2647 | struct type * |
| 2648 | result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv) |
| 2649 | { |
| 2650 | gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD |
| 2651 | && method->lval == lval_xcallable && !argv.empty ()); |
| 2652 | |
| 2653 | return method->location.xm_worker->get_result_type (argv[0], argv.slice (1)); |
| 2654 | } |
| 2655 | |
| 2656 | /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */ |
| 2657 | |
| 2658 | struct value * |
| 2659 | call_xmethod (struct value *method, gdb::array_view<value *> argv) |
| 2660 | { |
| 2661 | gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD |
| 2662 | && method->lval == lval_xcallable && !argv.empty ()); |
| 2663 | |
| 2664 | return method->location.xm_worker->invoke (argv[0], argv.slice (1)); |
| 2665 | } |
| 2666 | \f |
| 2667 | /* Extract a value as a C number (either long or double). |
| 2668 | Knows how to convert fixed values to double, or |
| 2669 | floating values to long. |
| 2670 | Does not deallocate the value. */ |
| 2671 | |
| 2672 | LONGEST |
| 2673 | value_as_long (struct value *val) |
| 2674 | { |
| 2675 | /* This coerces arrays and functions, which is necessary (e.g. |
| 2676 | in disassemble_command). It also dereferences references, which |
| 2677 | I suspect is the most logical thing to do. */ |
| 2678 | val = coerce_array (val); |
| 2679 | return unpack_long (value_type (val), value_contents (val)); |
| 2680 | } |
| 2681 | |
| 2682 | /* Extract a value as a C pointer. Does not deallocate the value. |
| 2683 | Note that val's type may not actually be a pointer; value_as_long |
| 2684 | handles all the cases. */ |
| 2685 | CORE_ADDR |
| 2686 | value_as_address (struct value *val) |
| 2687 | { |
| 2688 | struct gdbarch *gdbarch = value_type (val)->arch (); |
| 2689 | |
| 2690 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
| 2691 | whether we want this to be true eventually. */ |
| 2692 | #if 0 |
| 2693 | /* gdbarch_addr_bits_remove is wrong if we are being called for a |
| 2694 | non-address (e.g. argument to "signal", "info break", etc.), or |
| 2695 | for pointers to char, in which the low bits *are* significant. */ |
| 2696 | return gdbarch_addr_bits_remove (gdbarch, value_as_long (val)); |
| 2697 | #else |
| 2698 | |
| 2699 | /* There are several targets (IA-64, PowerPC, and others) which |
| 2700 | don't represent pointers to functions as simply the address of |
| 2701 | the function's entry point. For example, on the IA-64, a |
| 2702 | function pointer points to a two-word descriptor, generated by |
| 2703 | the linker, which contains the function's entry point, and the |
| 2704 | value the IA-64 "global pointer" register should have --- to |
| 2705 | support position-independent code. The linker generates |
| 2706 | descriptors only for those functions whose addresses are taken. |
| 2707 | |
| 2708 | On such targets, it's difficult for GDB to convert an arbitrary |
| 2709 | function address into a function pointer; it has to either find |
| 2710 | an existing descriptor for that function, or call malloc and |
| 2711 | build its own. On some targets, it is impossible for GDB to |
| 2712 | build a descriptor at all: the descriptor must contain a jump |
| 2713 | instruction; data memory cannot be executed; and code memory |
| 2714 | cannot be modified. |
| 2715 | |
| 2716 | Upon entry to this function, if VAL is a value of type `function' |
| 2717 | (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then |
| 2718 | value_address (val) is the address of the function. This is what |
| 2719 | you'll get if you evaluate an expression like `main'. The call |
| 2720 | to COERCE_ARRAY below actually does all the usual unary |
| 2721 | conversions, which includes converting values of type `function' |
| 2722 | to `pointer to function'. This is the challenging conversion |
| 2723 | discussed above. Then, `unpack_long' will convert that pointer |
| 2724 | back into an address. |
| 2725 | |
| 2726 | So, suppose the user types `disassemble foo' on an architecture |
| 2727 | with a strange function pointer representation, on which GDB |
| 2728 | cannot build its own descriptors, and suppose further that `foo' |
| 2729 | has no linker-built descriptor. The address->pointer conversion |
| 2730 | will signal an error and prevent the command from running, even |
| 2731 | though the next step would have been to convert the pointer |
| 2732 | directly back into the same address. |
| 2733 | |
| 2734 | The following shortcut avoids this whole mess. If VAL is a |
| 2735 | function, just return its address directly. */ |
| 2736 | if (value_type (val)->code () == TYPE_CODE_FUNC |
| 2737 | || value_type (val)->code () == TYPE_CODE_METHOD) |
| 2738 | return value_address (val); |
| 2739 | |
| 2740 | val = coerce_array (val); |
| 2741 | |
| 2742 | /* Some architectures (e.g. Harvard), map instruction and data |
| 2743 | addresses onto a single large unified address space. For |
| 2744 | instance: An architecture may consider a large integer in the |
| 2745 | range 0x10000000 .. 0x1000ffff to already represent a data |
| 2746 | addresses (hence not need a pointer to address conversion) while |
| 2747 | a small integer would still need to be converted integer to |
| 2748 | pointer to address. Just assume such architectures handle all |
| 2749 | integer conversions in a single function. */ |
| 2750 | |
| 2751 | /* JimB writes: |
| 2752 | |
| 2753 | I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we |
| 2754 | must admonish GDB hackers to make sure its behavior matches the |
| 2755 | compiler's, whenever possible. |
| 2756 | |
| 2757 | In general, I think GDB should evaluate expressions the same way |
| 2758 | the compiler does. When the user copies an expression out of |
| 2759 | their source code and hands it to a `print' command, they should |
| 2760 | get the same value the compiler would have computed. Any |
| 2761 | deviation from this rule can cause major confusion and annoyance, |
| 2762 | and needs to be justified carefully. In other words, GDB doesn't |
| 2763 | really have the freedom to do these conversions in clever and |
| 2764 | useful ways. |
| 2765 | |
| 2766 | AndrewC pointed out that users aren't complaining about how GDB |
| 2767 | casts integers to pointers; they are complaining that they can't |
| 2768 | take an address from a disassembly listing and give it to `x/i'. |
| 2769 | This is certainly important. |
| 2770 | |
| 2771 | Adding an architecture method like integer_to_address() certainly |
| 2772 | makes it possible for GDB to "get it right" in all circumstances |
| 2773 | --- the target has complete control over how things get done, so |
| 2774 | people can Do The Right Thing for their target without breaking |
| 2775 | anyone else. The standard doesn't specify how integers get |
| 2776 | converted to pointers; usually, the ABI doesn't either, but |
| 2777 | ABI-specific code is a more reasonable place to handle it. */ |
| 2778 | |
| 2779 | if (value_type (val)->code () != TYPE_CODE_PTR |
| 2780 | && !TYPE_IS_REFERENCE (value_type (val)) |
| 2781 | && gdbarch_integer_to_address_p (gdbarch)) |
| 2782 | return gdbarch_integer_to_address (gdbarch, value_type (val), |
| 2783 | value_contents (val)); |
| 2784 | |
| 2785 | return unpack_long (value_type (val), value_contents (val)); |
| 2786 | #endif |
| 2787 | } |
| 2788 | \f |
| 2789 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR |
| 2790 | as a long, or as a double, assuming the raw data is described |
| 2791 | by type TYPE. Knows how to convert different sizes of values |
| 2792 | and can convert between fixed and floating point. We don't assume |
| 2793 | any alignment for the raw data. Return value is in host byte order. |
| 2794 | |
| 2795 | If you want functions and arrays to be coerced to pointers, and |
| 2796 | references to be dereferenced, call value_as_long() instead. |
| 2797 | |
| 2798 | C++: It is assumed that the front-end has taken care of |
| 2799 | all matters concerning pointers to members. A pointer |
| 2800 | to member which reaches here is considered to be equivalent |
| 2801 | to an INT (or some size). After all, it is only an offset. */ |
| 2802 | |
| 2803 | LONGEST |
| 2804 | unpack_long (struct type *type, const gdb_byte *valaddr) |
| 2805 | { |
| 2806 | if (is_fixed_point_type (type)) |
| 2807 | type = type->fixed_point_type_base_type (); |
| 2808 | |
| 2809 | enum bfd_endian byte_order = type_byte_order (type); |
| 2810 | enum type_code code = type->code (); |
| 2811 | int len = TYPE_LENGTH (type); |
| 2812 | int nosign = type->is_unsigned (); |
| 2813 | |
| 2814 | switch (code) |
| 2815 | { |
| 2816 | case TYPE_CODE_TYPEDEF: |
| 2817 | return unpack_long (check_typedef (type), valaddr); |
| 2818 | case TYPE_CODE_ENUM: |
| 2819 | case TYPE_CODE_FLAGS: |
| 2820 | case TYPE_CODE_BOOL: |
| 2821 | case TYPE_CODE_INT: |
| 2822 | case TYPE_CODE_CHAR: |
| 2823 | case TYPE_CODE_RANGE: |
| 2824 | case TYPE_CODE_MEMBERPTR: |
| 2825 | { |
| 2826 | LONGEST result; |
| 2827 | |
| 2828 | if (type->bit_size_differs_p ()) |
| 2829 | { |
| 2830 | unsigned bit_off = type->bit_offset (); |
| 2831 | unsigned bit_size = type->bit_size (); |
| 2832 | if (bit_size == 0) |
| 2833 | { |
| 2834 | /* unpack_bits_as_long doesn't handle this case the |
| 2835 | way we'd like, so handle it here. */ |
| 2836 | result = 0; |
| 2837 | } |
| 2838 | else |
| 2839 | result = unpack_bits_as_long (type, valaddr, bit_off, bit_size); |
| 2840 | } |
| 2841 | else |
| 2842 | { |
| 2843 | if (nosign) |
| 2844 | result = extract_unsigned_integer (valaddr, len, byte_order); |
| 2845 | else |
| 2846 | result = extract_signed_integer (valaddr, len, byte_order); |
| 2847 | } |
| 2848 | if (code == TYPE_CODE_RANGE) |
| 2849 | result += type->bounds ()->bias; |
| 2850 | return result; |
| 2851 | } |
| 2852 | |
| 2853 | case TYPE_CODE_FLT: |
| 2854 | case TYPE_CODE_DECFLOAT: |
| 2855 | return target_float_to_longest (valaddr, type); |
| 2856 | |
| 2857 | case TYPE_CODE_FIXED_POINT: |
| 2858 | { |
| 2859 | gdb_mpq vq; |
| 2860 | vq.read_fixed_point (gdb::make_array_view (valaddr, len), |
| 2861 | byte_order, nosign, |
| 2862 | type->fixed_point_scaling_factor ()); |
| 2863 | |
| 2864 | gdb_mpz vz; |
| 2865 | mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val)); |
| 2866 | return vz.as_integer<LONGEST> (); |
| 2867 | } |
| 2868 | |
| 2869 | case TYPE_CODE_PTR: |
| 2870 | case TYPE_CODE_REF: |
| 2871 | case TYPE_CODE_RVALUE_REF: |
| 2872 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
| 2873 | whether we want this to be true eventually. */ |
| 2874 | return extract_typed_address (valaddr, type); |
| 2875 | |
| 2876 | default: |
| 2877 | error (_("Value can't be converted to integer.")); |
| 2878 | } |
| 2879 | } |
| 2880 | |
| 2881 | /* Unpack raw data (copied from debugee, target byte order) at VALADDR |
| 2882 | as a CORE_ADDR, assuming the raw data is described by type TYPE. |
| 2883 | We don't assume any alignment for the raw data. Return value is in |
| 2884 | host byte order. |
| 2885 | |
| 2886 | If you want functions and arrays to be coerced to pointers, and |
| 2887 | references to be dereferenced, call value_as_address() instead. |
| 2888 | |
| 2889 | C++: It is assumed that the front-end has taken care of |
| 2890 | all matters concerning pointers to members. A pointer |
| 2891 | to member which reaches here is considered to be equivalent |
| 2892 | to an INT (or some size). After all, it is only an offset. */ |
| 2893 | |
| 2894 | CORE_ADDR |
| 2895 | unpack_pointer (struct type *type, const gdb_byte *valaddr) |
| 2896 | { |
| 2897 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure |
| 2898 | whether we want this to be true eventually. */ |
| 2899 | return unpack_long (type, valaddr); |
| 2900 | } |
| 2901 | |
| 2902 | bool |
| 2903 | is_floating_value (struct value *val) |
| 2904 | { |
| 2905 | struct type *type = check_typedef (value_type (val)); |
| 2906 | |
| 2907 | if (is_floating_type (type)) |
| 2908 | { |
| 2909 | if (!target_float_is_valid (value_contents (val), type)) |
| 2910 | error (_("Invalid floating value found in program.")); |
| 2911 | return true; |
| 2912 | } |
| 2913 | |
| 2914 | return false; |
| 2915 | } |
| 2916 | |
| 2917 | \f |
| 2918 | /* Get the value of the FIELDNO'th field (which must be static) of |
| 2919 | TYPE. */ |
| 2920 | |
| 2921 | struct value * |
| 2922 | value_static_field (struct type *type, int fieldno) |
| 2923 | { |
| 2924 | struct value *retval; |
| 2925 | |
| 2926 | switch (TYPE_FIELD_LOC_KIND (type, fieldno)) |
| 2927 | { |
| 2928 | case FIELD_LOC_KIND_PHYSADDR: |
| 2929 | retval = value_at_lazy (type->field (fieldno).type (), |
| 2930 | TYPE_FIELD_STATIC_PHYSADDR (type, fieldno)); |
| 2931 | break; |
| 2932 | case FIELD_LOC_KIND_PHYSNAME: |
| 2933 | { |
| 2934 | const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno); |
| 2935 | /* TYPE_FIELD_NAME (type, fieldno); */ |
| 2936 | struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0); |
| 2937 | |
| 2938 | if (sym.symbol == NULL) |
| 2939 | { |
| 2940 | /* With some compilers, e.g. HP aCC, static data members are |
| 2941 | reported as non-debuggable symbols. */ |
| 2942 | struct bound_minimal_symbol msym |
| 2943 | = lookup_minimal_symbol (phys_name, NULL, NULL); |
| 2944 | struct type *field_type = type->field (fieldno).type (); |
| 2945 | |
| 2946 | if (!msym.minsym) |
| 2947 | retval = allocate_optimized_out_value (field_type); |
| 2948 | else |
| 2949 | retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym)); |
| 2950 | } |
| 2951 | else |
| 2952 | retval = value_of_variable (sym.symbol, sym.block); |
| 2953 | break; |
| 2954 | } |
| 2955 | default: |
| 2956 | gdb_assert_not_reached ("unexpected field location kind"); |
| 2957 | } |
| 2958 | |
| 2959 | return retval; |
| 2960 | } |
| 2961 | |
| 2962 | /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE. |
| 2963 | You have to be careful here, since the size of the data area for the value |
| 2964 | is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger |
| 2965 | than the old enclosing type, you have to allocate more space for the |
| 2966 | data. */ |
| 2967 | |
| 2968 | void |
| 2969 | set_value_enclosing_type (struct value *val, struct type *new_encl_type) |
| 2970 | { |
| 2971 | if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val))) |
| 2972 | { |
| 2973 | check_type_length_before_alloc (new_encl_type); |
| 2974 | val->contents |
| 2975 | .reset ((gdb_byte *) xrealloc (val->contents.release (), |
| 2976 | TYPE_LENGTH (new_encl_type))); |
| 2977 | } |
| 2978 | |
| 2979 | val->enclosing_type = new_encl_type; |
| 2980 | } |
| 2981 | |
| 2982 | /* Given a value ARG1 (offset by OFFSET bytes) |
| 2983 | of a struct or union type ARG_TYPE, |
| 2984 | extract and return the value of one of its (non-static) fields. |
| 2985 | FIELDNO says which field. */ |
| 2986 | |
| 2987 | struct value * |
| 2988 | value_primitive_field (struct value *arg1, LONGEST offset, |
| 2989 | int fieldno, struct type *arg_type) |
| 2990 | { |
| 2991 | struct value *v; |
| 2992 | struct type *type; |
| 2993 | struct gdbarch *arch = get_value_arch (arg1); |
| 2994 | int unit_size = gdbarch_addressable_memory_unit_size (arch); |
| 2995 | |
| 2996 | arg_type = check_typedef (arg_type); |
| 2997 | type = arg_type->field (fieldno).type (); |
| 2998 | |
| 2999 | /* Call check_typedef on our type to make sure that, if TYPE |
| 3000 | is a TYPE_CODE_TYPEDEF, its length is set to the length |
| 3001 | of the target type instead of zero. However, we do not |
| 3002 | replace the typedef type by the target type, because we want |
| 3003 | to keep the typedef in order to be able to print the type |
| 3004 | description correctly. */ |
| 3005 | check_typedef (type); |
| 3006 | |
| 3007 | if (TYPE_FIELD_BITSIZE (arg_type, fieldno)) |
| 3008 | { |
| 3009 | /* Handle packed fields. |
| 3010 | |
| 3011 | Create a new value for the bitfield, with bitpos and bitsize |
| 3012 | set. If possible, arrange offset and bitpos so that we can |
| 3013 | do a single aligned read of the size of the containing type. |
| 3014 | Otherwise, adjust offset to the byte containing the first |
| 3015 | bit. Assume that the address, offset, and embedded offset |
| 3016 | are sufficiently aligned. */ |
| 3017 | |
| 3018 | LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno); |
| 3019 | LONGEST container_bitsize = TYPE_LENGTH (type) * 8; |
| 3020 | |
| 3021 | v = allocate_value_lazy (type); |
| 3022 | v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno); |
| 3023 | if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize |
| 3024 | && TYPE_LENGTH (type) <= (int) sizeof (LONGEST)) |
| 3025 | v->bitpos = bitpos % container_bitsize; |
| 3026 | else |
| 3027 | v->bitpos = bitpos % 8; |
| 3028 | v->offset = (value_embedded_offset (arg1) |
| 3029 | + offset |
| 3030 | + (bitpos - v->bitpos) / 8); |
| 3031 | set_value_parent (v, arg1); |
| 3032 | if (!value_lazy (arg1)) |
| 3033 | value_fetch_lazy (v); |
| 3034 | } |
| 3035 | else if (fieldno < TYPE_N_BASECLASSES (arg_type)) |
| 3036 | { |
| 3037 | /* This field is actually a base subobject, so preserve the |
| 3038 | entire object's contents for later references to virtual |
| 3039 | bases, etc. */ |
| 3040 | LONGEST boffset; |
| 3041 | |
| 3042 | /* Lazy register values with offsets are not supported. */ |
| 3043 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) |
| 3044 | value_fetch_lazy (arg1); |
| 3045 | |
| 3046 | /* We special case virtual inheritance here because this |
| 3047 | requires access to the contents, which we would rather avoid |
| 3048 | for references to ordinary fields of unavailable values. */ |
| 3049 | if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno)) |
| 3050 | boffset = baseclass_offset (arg_type, fieldno, |
| 3051 | value_contents (arg1), |
| 3052 | value_embedded_offset (arg1), |
| 3053 | value_address (arg1), |
| 3054 | arg1); |
| 3055 | else |
| 3056 | boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; |
| 3057 | |
| 3058 | if (value_lazy (arg1)) |
| 3059 | v = allocate_value_lazy (value_enclosing_type (arg1)); |
| 3060 | else |
| 3061 | { |
| 3062 | v = allocate_value (value_enclosing_type (arg1)); |
| 3063 | value_contents_copy_raw (v, 0, arg1, 0, |
| 3064 | TYPE_LENGTH (value_enclosing_type (arg1))); |
| 3065 | } |
| 3066 | v->type = type; |
| 3067 | v->offset = value_offset (arg1); |
| 3068 | v->embedded_offset = offset + value_embedded_offset (arg1) + boffset; |
| 3069 | } |
| 3070 | else if (NULL != TYPE_DATA_LOCATION (type)) |
| 3071 | { |
| 3072 | /* Field is a dynamic data member. */ |
| 3073 | |
| 3074 | gdb_assert (0 == offset); |
| 3075 | /* We expect an already resolved data location. */ |
| 3076 | gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type)); |
| 3077 | /* For dynamic data types defer memory allocation |
| 3078 | until we actual access the value. */ |
| 3079 | v = allocate_value_lazy (type); |
| 3080 | } |
| 3081 | else |
| 3082 | { |
| 3083 | /* Plain old data member */ |
| 3084 | offset += (TYPE_FIELD_BITPOS (arg_type, fieldno) |
| 3085 | / (HOST_CHAR_BIT * unit_size)); |
| 3086 | |
| 3087 | /* Lazy register values with offsets are not supported. */ |
| 3088 | if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1)) |
| 3089 | value_fetch_lazy (arg1); |
| 3090 | |
| 3091 | if (value_lazy (arg1)) |
| 3092 | v = allocate_value_lazy (type); |
| 3093 | else |
| 3094 | { |
| 3095 | v = allocate_value (type); |
| 3096 | value_contents_copy_raw (v, value_embedded_offset (v), |
| 3097 | arg1, value_embedded_offset (arg1) + offset, |
| 3098 | type_length_units (type)); |
| 3099 | } |
| 3100 | v->offset = (value_offset (arg1) + offset |
| 3101 | + value_embedded_offset (arg1)); |
| 3102 | } |
| 3103 | set_value_component_location (v, arg1); |
| 3104 | return v; |
| 3105 | } |
| 3106 | |
| 3107 | /* Given a value ARG1 of a struct or union type, |
| 3108 | extract and return the value of one of its (non-static) fields. |
| 3109 | FIELDNO says which field. */ |
| 3110 | |
| 3111 | struct value * |
| 3112 | value_field (struct value *arg1, int fieldno) |
| 3113 | { |
| 3114 | return value_primitive_field (arg1, 0, fieldno, value_type (arg1)); |
| 3115 | } |
| 3116 | |
| 3117 | /* Return a non-virtual function as a value. |
| 3118 | F is the list of member functions which contains the desired method. |
| 3119 | J is an index into F which provides the desired method. |
| 3120 | |
| 3121 | We only use the symbol for its address, so be happy with either a |
| 3122 | full symbol or a minimal symbol. */ |
| 3123 | |
| 3124 | struct value * |
| 3125 | value_fn_field (struct value **arg1p, struct fn_field *f, |
| 3126 | int j, struct type *type, |
| 3127 | LONGEST offset) |
| 3128 | { |
| 3129 | struct value *v; |
| 3130 | struct type *ftype = TYPE_FN_FIELD_TYPE (f, j); |
| 3131 | const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j); |
| 3132 | struct symbol *sym; |
| 3133 | struct bound_minimal_symbol msym; |
| 3134 | |
| 3135 | sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol; |
| 3136 | if (sym != NULL) |
| 3137 | { |
| 3138 | memset (&msym, 0, sizeof (msym)); |
| 3139 | } |
| 3140 | else |
| 3141 | { |
| 3142 | gdb_assert (sym == NULL); |
| 3143 | msym = lookup_bound_minimal_symbol (physname); |
| 3144 | if (msym.minsym == NULL) |
| 3145 | return NULL; |
| 3146 | } |
| 3147 | |
| 3148 | v = allocate_value (ftype); |
| 3149 | VALUE_LVAL (v) = lval_memory; |
| 3150 | if (sym) |
| 3151 | { |
| 3152 | set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym))); |
| 3153 | } |
| 3154 | else |
| 3155 | { |
| 3156 | /* The minimal symbol might point to a function descriptor; |
| 3157 | resolve it to the actual code address instead. */ |
| 3158 | struct objfile *objfile = msym.objfile; |
| 3159 | struct gdbarch *gdbarch = objfile->arch (); |
| 3160 | |
| 3161 | set_value_address (v, |
| 3162 | gdbarch_convert_from_func_ptr_addr |
| 3163 | (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), |
| 3164 | current_inferior ()->top_target ())); |
| 3165 | } |
| 3166 | |
| 3167 | if (arg1p) |
| 3168 | { |
| 3169 | if (type != value_type (*arg1p)) |
| 3170 | *arg1p = value_ind (value_cast (lookup_pointer_type (type), |
| 3171 | value_addr (*arg1p))); |
| 3172 | |
| 3173 | /* Move the `this' pointer according to the offset. |
| 3174 | VALUE_OFFSET (*arg1p) += offset; */ |
| 3175 | } |
| 3176 | |
| 3177 | return v; |
| 3178 | } |
| 3179 | |
| 3180 | \f |
| 3181 | |
| 3182 | /* See value.h. */ |
| 3183 | |
| 3184 | LONGEST |
| 3185 | unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, |
| 3186 | LONGEST bitpos, LONGEST bitsize) |
| 3187 | { |
| 3188 | enum bfd_endian byte_order = type_byte_order (field_type); |
| 3189 | ULONGEST val; |
| 3190 | ULONGEST valmask; |
| 3191 | int lsbcount; |
| 3192 | LONGEST bytes_read; |
| 3193 | LONGEST read_offset; |
| 3194 | |
| 3195 | /* Read the minimum number of bytes required; there may not be |
| 3196 | enough bytes to read an entire ULONGEST. */ |
| 3197 | field_type = check_typedef (field_type); |
| 3198 | if (bitsize) |
| 3199 | bytes_read = ((bitpos % 8) + bitsize + 7) / 8; |
| 3200 | else |
| 3201 | { |
| 3202 | bytes_read = TYPE_LENGTH (field_type); |
| 3203 | bitsize = 8 * bytes_read; |
| 3204 | } |
| 3205 | |
| 3206 | read_offset = bitpos / 8; |
| 3207 | |
| 3208 | val = extract_unsigned_integer (valaddr + read_offset, |
| 3209 | bytes_read, byte_order); |
| 3210 | |
| 3211 | /* Extract bits. See comment above. */ |
| 3212 | |
| 3213 | if (byte_order == BFD_ENDIAN_BIG) |
| 3214 | lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); |
| 3215 | else |
| 3216 | lsbcount = (bitpos % 8); |
| 3217 | val >>= lsbcount; |
| 3218 | |
| 3219 | /* If the field does not entirely fill a LONGEST, then zero the sign bits. |
| 3220 | If the field is signed, and is negative, then sign extend. */ |
| 3221 | |
| 3222 | if (bitsize < 8 * (int) sizeof (val)) |
| 3223 | { |
| 3224 | valmask = (((ULONGEST) 1) << bitsize) - 1; |
| 3225 | val &= valmask; |
| 3226 | if (!field_type->is_unsigned ()) |
| 3227 | { |
| 3228 | if (val & (valmask ^ (valmask >> 1))) |
| 3229 | { |
| 3230 | val |= ~valmask; |
| 3231 | } |
| 3232 | } |
| 3233 | } |
| 3234 | |
| 3235 | return val; |
| 3236 | } |
| 3237 | |
| 3238 | /* Unpack a field FIELDNO of the specified TYPE, from the object at |
| 3239 | VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of |
| 3240 | ORIGINAL_VALUE, which must not be NULL. See |
| 3241 | unpack_value_bits_as_long for more details. */ |
| 3242 | |
| 3243 | int |
| 3244 | unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr, |
| 3245 | LONGEST embedded_offset, int fieldno, |
| 3246 | const struct value *val, LONGEST *result) |
| 3247 | { |
| 3248 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
| 3249 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); |
| 3250 | struct type *field_type = type->field (fieldno).type (); |
| 3251 | int bit_offset; |
| 3252 | |
| 3253 | gdb_assert (val != NULL); |
| 3254 | |
| 3255 | bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; |
| 3256 | if (value_bits_any_optimized_out (val, bit_offset, bitsize) |
| 3257 | || !value_bits_available (val, bit_offset, bitsize)) |
| 3258 | return 0; |
| 3259 | |
| 3260 | *result = unpack_bits_as_long (field_type, valaddr + embedded_offset, |
| 3261 | bitpos, bitsize); |
| 3262 | return 1; |
| 3263 | } |
| 3264 | |
| 3265 | /* Unpack a field FIELDNO of the specified TYPE, from the anonymous |
| 3266 | object at VALADDR. See unpack_bits_as_long for more details. */ |
| 3267 | |
| 3268 | LONGEST |
| 3269 | unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) |
| 3270 | { |
| 3271 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
| 3272 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); |
| 3273 | struct type *field_type = type->field (fieldno).type (); |
| 3274 | |
| 3275 | return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize); |
| 3276 | } |
| 3277 | |
| 3278 | /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at |
| 3279 | VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store |
| 3280 | the contents in DEST_VAL, zero or sign extending if the type of |
| 3281 | DEST_VAL is wider than BITSIZE. VALADDR points to the contents of |
| 3282 | VAL. If the VAL's contents required to extract the bitfield from |
| 3283 | are unavailable/optimized out, DEST_VAL is correspondingly |
| 3284 | marked unavailable/optimized out. */ |
| 3285 | |
| 3286 | void |
| 3287 | unpack_value_bitfield (struct value *dest_val, |
| 3288 | LONGEST bitpos, LONGEST bitsize, |
| 3289 | const gdb_byte *valaddr, LONGEST embedded_offset, |
| 3290 | const struct value *val) |
| 3291 | { |
| 3292 | enum bfd_endian byte_order; |
| 3293 | int src_bit_offset; |
| 3294 | int dst_bit_offset; |
| 3295 | struct type *field_type = value_type (dest_val); |
| 3296 | |
| 3297 | byte_order = type_byte_order (field_type); |
| 3298 | |
| 3299 | /* First, unpack and sign extend the bitfield as if it was wholly |
| 3300 | valid. Optimized out/unavailable bits are read as zero, but |
| 3301 | that's OK, as they'll end up marked below. If the VAL is |
| 3302 | wholly-invalid we may have skipped allocating its contents, |
| 3303 | though. See allocate_optimized_out_value. */ |
| 3304 | if (valaddr != NULL) |
| 3305 | { |
| 3306 | LONGEST num; |
| 3307 | |
| 3308 | num = unpack_bits_as_long (field_type, valaddr + embedded_offset, |
| 3309 | bitpos, bitsize); |
| 3310 | store_signed_integer (value_contents_raw (dest_val), |
| 3311 | TYPE_LENGTH (field_type), byte_order, num); |
| 3312 | } |
| 3313 | |
| 3314 | /* Now copy the optimized out / unavailability ranges to the right |
| 3315 | bits. */ |
| 3316 | src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; |
| 3317 | if (byte_order == BFD_ENDIAN_BIG) |
| 3318 | dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize; |
| 3319 | else |
| 3320 | dst_bit_offset = 0; |
| 3321 | value_ranges_copy_adjusted (dest_val, dst_bit_offset, |
| 3322 | val, src_bit_offset, bitsize); |
| 3323 | } |
| 3324 | |
| 3325 | /* Return a new value with type TYPE, which is FIELDNO field of the |
| 3326 | object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents |
| 3327 | of VAL. If the VAL's contents required to extract the bitfield |
| 3328 | from are unavailable/optimized out, the new value is |
| 3329 | correspondingly marked unavailable/optimized out. */ |
| 3330 | |
| 3331 | struct value * |
| 3332 | value_field_bitfield (struct type *type, int fieldno, |
| 3333 | const gdb_byte *valaddr, |
| 3334 | LONGEST embedded_offset, const struct value *val) |
| 3335 | { |
| 3336 | int bitpos = TYPE_FIELD_BITPOS (type, fieldno); |
| 3337 | int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); |
| 3338 | struct value *res_val = allocate_value (type->field (fieldno).type ()); |
| 3339 | |
| 3340 | unpack_value_bitfield (res_val, bitpos, bitsize, |
| 3341 | valaddr, embedded_offset, val); |
| 3342 | |
| 3343 | return res_val; |
| 3344 | } |
| 3345 | |
| 3346 | /* Modify the value of a bitfield. ADDR points to a block of memory in |
| 3347 | target byte order; the bitfield starts in the byte pointed to. FIELDVAL |
| 3348 | is the desired value of the field, in host byte order. BITPOS and BITSIZE |
| 3349 | indicate which bits (in target bit order) comprise the bitfield. |
| 3350 | Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and |
| 3351 | 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */ |
| 3352 | |
| 3353 | void |
| 3354 | modify_field (struct type *type, gdb_byte *addr, |
| 3355 | LONGEST fieldval, LONGEST bitpos, LONGEST bitsize) |
| 3356 | { |
| 3357 | enum bfd_endian byte_order = type_byte_order (type); |
| 3358 | ULONGEST oword; |
| 3359 | ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); |
| 3360 | LONGEST bytesize; |
| 3361 | |
| 3362 | /* Normalize BITPOS. */ |
| 3363 | addr += bitpos / 8; |
| 3364 | bitpos %= 8; |
| 3365 | |
| 3366 | /* If a negative fieldval fits in the field in question, chop |
| 3367 | off the sign extension bits. */ |
| 3368 | if ((~fieldval & ~(mask >> 1)) == 0) |
| 3369 | fieldval &= mask; |
| 3370 | |
| 3371 | /* Warn if value is too big to fit in the field in question. */ |
| 3372 | if (0 != (fieldval & ~mask)) |
| 3373 | { |
| 3374 | /* FIXME: would like to include fieldval in the message, but |
| 3375 | we don't have a sprintf_longest. */ |
| 3376 | warning (_("Value does not fit in %s bits."), plongest (bitsize)); |
| 3377 | |
| 3378 | /* Truncate it, otherwise adjoining fields may be corrupted. */ |
| 3379 | fieldval &= mask; |
| 3380 | } |
| 3381 | |
| 3382 | /* Ensure no bytes outside of the modified ones get accessed as it may cause |
| 3383 | false valgrind reports. */ |
| 3384 | |
| 3385 | bytesize = (bitpos + bitsize + 7) / 8; |
| 3386 | oword = extract_unsigned_integer (addr, bytesize, byte_order); |
| 3387 | |
| 3388 | /* Shifting for bit field depends on endianness of the target machine. */ |
| 3389 | if (byte_order == BFD_ENDIAN_BIG) |
| 3390 | bitpos = bytesize * 8 - bitpos - bitsize; |
| 3391 | |
| 3392 | oword &= ~(mask << bitpos); |
| 3393 | oword |= fieldval << bitpos; |
| 3394 | |
| 3395 | store_unsigned_integer (addr, bytesize, byte_order, oword); |
| 3396 | } |
| 3397 | \f |
| 3398 | /* Pack NUM into BUF using a target format of TYPE. */ |
| 3399 | |
| 3400 | void |
| 3401 | pack_long (gdb_byte *buf, struct type *type, LONGEST num) |
| 3402 | { |
| 3403 | enum bfd_endian byte_order = type_byte_order (type); |
| 3404 | LONGEST len; |
| 3405 | |
| 3406 | type = check_typedef (type); |
| 3407 | len = TYPE_LENGTH (type); |
| 3408 | |
| 3409 | switch (type->code ()) |
| 3410 | { |
| 3411 | case TYPE_CODE_RANGE: |
| 3412 | num -= type->bounds ()->bias; |
| 3413 | /* Fall through. */ |
| 3414 | case TYPE_CODE_INT: |
| 3415 | case TYPE_CODE_CHAR: |
| 3416 | case TYPE_CODE_ENUM: |
| 3417 | case TYPE_CODE_FLAGS: |
| 3418 | case TYPE_CODE_BOOL: |
| 3419 | case TYPE_CODE_MEMBERPTR: |
| 3420 | if (type->bit_size_differs_p ()) |
| 3421 | { |
| 3422 | unsigned bit_off = type->bit_offset (); |
| 3423 | unsigned bit_size = type->bit_size (); |
| 3424 | num &= ((ULONGEST) 1 << bit_size) - 1; |
| 3425 | num <<= bit_off; |
| 3426 | } |
| 3427 | store_signed_integer (buf, len, byte_order, num); |
| 3428 | break; |
| 3429 | |
| 3430 | case TYPE_CODE_REF: |
| 3431 | case TYPE_CODE_RVALUE_REF: |
| 3432 | case TYPE_CODE_PTR: |
| 3433 | store_typed_address (buf, type, (CORE_ADDR) num); |
| 3434 | break; |
| 3435 | |
| 3436 | case TYPE_CODE_FLT: |
| 3437 | case TYPE_CODE_DECFLOAT: |
| 3438 | target_float_from_longest (buf, type, num); |
| 3439 | break; |
| 3440 | |
| 3441 | default: |
| 3442 | error (_("Unexpected type (%d) encountered for integer constant."), |
| 3443 | type->code ()); |
| 3444 | } |
| 3445 | } |
| 3446 | |
| 3447 | |
| 3448 | /* Pack NUM into BUF using a target format of TYPE. */ |
| 3449 | |
| 3450 | static void |
| 3451 | pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) |
| 3452 | { |
| 3453 | LONGEST len; |
| 3454 | enum bfd_endian byte_order; |
| 3455 | |
| 3456 | type = check_typedef (type); |
| 3457 | len = TYPE_LENGTH (type); |
| 3458 | byte_order = type_byte_order (type); |
| 3459 | |
| 3460 | switch (type->code ()) |
| 3461 | { |
| 3462 | case TYPE_CODE_INT: |
| 3463 | case TYPE_CODE_CHAR: |
| 3464 | case TYPE_CODE_ENUM: |
| 3465 | case TYPE_CODE_FLAGS: |
| 3466 | case TYPE_CODE_BOOL: |
| 3467 | case TYPE_CODE_RANGE: |
| 3468 | case TYPE_CODE_MEMBERPTR: |
| 3469 | if (type->bit_size_differs_p ()) |
| 3470 | { |
| 3471 | unsigned bit_off = type->bit_offset (); |
| 3472 | unsigned bit_size = type->bit_size (); |
| 3473 | num &= ((ULONGEST) 1 << bit_size) - 1; |
| 3474 | num <<= bit_off; |
| 3475 | } |
| 3476 | store_unsigned_integer (buf, len, byte_order, num); |
| 3477 | break; |
| 3478 | |
| 3479 | case TYPE_CODE_REF: |
| 3480 | case TYPE_CODE_RVALUE_REF: |
| 3481 | case TYPE_CODE_PTR: |
| 3482 | store_typed_address (buf, type, (CORE_ADDR) num); |
| 3483 | break; |
| 3484 | |
| 3485 | case TYPE_CODE_FLT: |
| 3486 | case TYPE_CODE_DECFLOAT: |
| 3487 | target_float_from_ulongest (buf, type, num); |
| 3488 | break; |
| 3489 | |
| 3490 | default: |
| 3491 | error (_("Unexpected type (%d) encountered " |
| 3492 | "for unsigned integer constant."), |
| 3493 | type->code ()); |
| 3494 | } |
| 3495 | } |
| 3496 | |
| 3497 | |
| 3498 | /* Convert C numbers into newly allocated values. */ |
| 3499 | |
| 3500 | struct value * |
| 3501 | value_from_longest (struct type *type, LONGEST num) |
| 3502 | { |
| 3503 | struct value *val = allocate_value (type); |
| 3504 | |
| 3505 | pack_long (value_contents_raw (val), type, num); |
| 3506 | return val; |
| 3507 | } |
| 3508 | |
| 3509 | |
| 3510 | /* Convert C unsigned numbers into newly allocated values. */ |
| 3511 | |
| 3512 | struct value * |
| 3513 | value_from_ulongest (struct type *type, ULONGEST num) |
| 3514 | { |
| 3515 | struct value *val = allocate_value (type); |
| 3516 | |
| 3517 | pack_unsigned_long (value_contents_raw (val), type, num); |
| 3518 | |
| 3519 | return val; |
| 3520 | } |
| 3521 | |
| 3522 | |
| 3523 | /* Create a value representing a pointer of type TYPE to the address |
| 3524 | ADDR. */ |
| 3525 | |
| 3526 | struct value * |
| 3527 | value_from_pointer (struct type *type, CORE_ADDR addr) |
| 3528 | { |
| 3529 | struct value *val = allocate_value (type); |
| 3530 | |
| 3531 | store_typed_address (value_contents_raw (val), |
| 3532 | check_typedef (type), addr); |
| 3533 | return val; |
| 3534 | } |
| 3535 | |
| 3536 | /* Create and return a value object of TYPE containing the value D. The |
| 3537 | TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once |
| 3538 | it is converted to target format. */ |
| 3539 | |
| 3540 | struct value * |
| 3541 | value_from_host_double (struct type *type, double d) |
| 3542 | { |
| 3543 | struct value *value = allocate_value (type); |
| 3544 | gdb_assert (type->code () == TYPE_CODE_FLT); |
| 3545 | target_float_from_host_double (value_contents_raw (value), |
| 3546 | value_type (value), d); |
| 3547 | return value; |
| 3548 | } |
| 3549 | |
| 3550 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
| 3551 | is non-null, and whose memory address (in the inferior) is |
| 3552 | ADDRESS. The type of the created value may differ from the passed |
| 3553 | type TYPE. Make sure to retrieve values new type after this call. |
| 3554 | Note that TYPE is not passed through resolve_dynamic_type; this is |
| 3555 | a special API intended for use only by Ada. */ |
| 3556 | |
| 3557 | struct value * |
| 3558 | value_from_contents_and_address_unresolved (struct type *type, |
| 3559 | const gdb_byte *valaddr, |
| 3560 | CORE_ADDR address) |
| 3561 | { |
| 3562 | struct value *v; |
| 3563 | |
| 3564 | if (valaddr == NULL) |
| 3565 | v = allocate_value_lazy (type); |
| 3566 | else |
| 3567 | v = value_from_contents (type, valaddr); |
| 3568 | VALUE_LVAL (v) = lval_memory; |
| 3569 | set_value_address (v, address); |
| 3570 | return v; |
| 3571 | } |
| 3572 | |
| 3573 | /* Create a value of type TYPE whose contents come from VALADDR, if it |
| 3574 | is non-null, and whose memory address (in the inferior) is |
| 3575 | ADDRESS. The type of the created value may differ from the passed |
| 3576 | type TYPE. Make sure to retrieve values new type after this call. */ |
| 3577 | |
| 3578 | struct value * |
| 3579 | value_from_contents_and_address (struct type *type, |
| 3580 | const gdb_byte *valaddr, |
| 3581 | CORE_ADDR address) |
| 3582 | { |
| 3583 | gdb::array_view<const gdb_byte> view; |
| 3584 | if (valaddr != nullptr) |
| 3585 | view = gdb::make_array_view (valaddr, TYPE_LENGTH (type)); |
| 3586 | struct type *resolved_type = resolve_dynamic_type (type, view, address); |
| 3587 | struct type *resolved_type_no_typedef = check_typedef (resolved_type); |
| 3588 | struct value *v; |
| 3589 | |
| 3590 | if (valaddr == NULL) |
| 3591 | v = allocate_value_lazy (resolved_type); |
| 3592 | else |
| 3593 | v = value_from_contents (resolved_type, valaddr); |
| 3594 | if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL |
| 3595 | && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST) |
| 3596 | address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef); |
| 3597 | VALUE_LVAL (v) = lval_memory; |
| 3598 | set_value_address (v, address); |
| 3599 | return v; |
| 3600 | } |
| 3601 | |
| 3602 | /* Create a value of type TYPE holding the contents CONTENTS. |
| 3603 | The new value is `not_lval'. */ |
| 3604 | |
| 3605 | struct value * |
| 3606 | value_from_contents (struct type *type, const gdb_byte *contents) |
| 3607 | { |
| 3608 | struct value *result; |
| 3609 | |
| 3610 | result = allocate_value (type); |
| 3611 | memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type)); |
| 3612 | return result; |
| 3613 | } |
| 3614 | |
| 3615 | /* Extract a value from the history file. Input will be of the form |
| 3616 | $digits or $$digits. See block comment above 'write_dollar_variable' |
| 3617 | for details. */ |
| 3618 | |
| 3619 | struct value * |
| 3620 | value_from_history_ref (const char *h, const char **endp) |
| 3621 | { |
| 3622 | int index, len; |
| 3623 | |
| 3624 | if (h[0] == '$') |
| 3625 | len = 1; |
| 3626 | else |
| 3627 | return NULL; |
| 3628 | |
| 3629 | if (h[1] == '$') |
| 3630 | len = 2; |
| 3631 | |
| 3632 | /* Find length of numeral string. */ |
| 3633 | for (; isdigit (h[len]); len++) |
| 3634 | ; |
| 3635 | |
| 3636 | /* Make sure numeral string is not part of an identifier. */ |
| 3637 | if (h[len] == '_' || isalpha (h[len])) |
| 3638 | return NULL; |
| 3639 | |
| 3640 | /* Now collect the index value. */ |
| 3641 | if (h[1] == '$') |
| 3642 | { |
| 3643 | if (len == 2) |
| 3644 | { |
| 3645 | /* For some bizarre reason, "$$" is equivalent to "$$1", |
| 3646 | rather than to "$$0" as it ought to be! */ |
| 3647 | index = -1; |
| 3648 | *endp += len; |
| 3649 | } |
| 3650 | else |
| 3651 | { |
| 3652 | char *local_end; |
| 3653 | |
| 3654 | index = -strtol (&h[2], &local_end, 10); |
| 3655 | *endp = local_end; |
| 3656 | } |
| 3657 | } |
| 3658 | else |
| 3659 | { |
| 3660 | if (len == 1) |
| 3661 | { |
| 3662 | /* "$" is equivalent to "$0". */ |
| 3663 | index = 0; |
| 3664 | *endp += len; |
| 3665 | } |
| 3666 | else |
| 3667 | { |
| 3668 | char *local_end; |
| 3669 | |
| 3670 | index = strtol (&h[1], &local_end, 10); |
| 3671 | *endp = local_end; |
| 3672 | } |
| 3673 | } |
| 3674 | |
| 3675 | return access_value_history (index); |
| 3676 | } |
| 3677 | |
| 3678 | /* Get the component value (offset by OFFSET bytes) of a struct or |
| 3679 | union WHOLE. Component's type is TYPE. */ |
| 3680 | |
| 3681 | struct value * |
| 3682 | value_from_component (struct value *whole, struct type *type, LONGEST offset) |
| 3683 | { |
| 3684 | struct value *v; |
| 3685 | |
| 3686 | if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole)) |
| 3687 | v = allocate_value_lazy (type); |
| 3688 | else |
| 3689 | { |
| 3690 | v = allocate_value (type); |
| 3691 | value_contents_copy (v, value_embedded_offset (v), |
| 3692 | whole, value_embedded_offset (whole) + offset, |
| 3693 | type_length_units (type)); |
| 3694 | } |
| 3695 | v->offset = value_offset (whole) + offset + value_embedded_offset (whole); |
| 3696 | set_value_component_location (v, whole); |
| 3697 | |
| 3698 | return v; |
| 3699 | } |
| 3700 | |
| 3701 | struct value * |
| 3702 | coerce_ref_if_computed (const struct value *arg) |
| 3703 | { |
| 3704 | const struct lval_funcs *funcs; |
| 3705 | |
| 3706 | if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg)))) |
| 3707 | return NULL; |
| 3708 | |
| 3709 | if (value_lval_const (arg) != lval_computed) |
| 3710 | return NULL; |
| 3711 | |
| 3712 | funcs = value_computed_funcs (arg); |
| 3713 | if (funcs->coerce_ref == NULL) |
| 3714 | return NULL; |
| 3715 | |
| 3716 | return funcs->coerce_ref (arg); |
| 3717 | } |
| 3718 | |
| 3719 | /* Look at value.h for description. */ |
| 3720 | |
| 3721 | struct value * |
| 3722 | readjust_indirect_value_type (struct value *value, struct type *enc_type, |
| 3723 | const struct type *original_type, |
| 3724 | struct value *original_value, |
| 3725 | CORE_ADDR original_value_address) |
| 3726 | { |
| 3727 | gdb_assert (original_type->code () == TYPE_CODE_PTR |
| 3728 | || TYPE_IS_REFERENCE (original_type)); |
| 3729 | |
| 3730 | struct type *original_target_type = TYPE_TARGET_TYPE (original_type); |
| 3731 | gdb::array_view<const gdb_byte> view; |
| 3732 | struct type *resolved_original_target_type |
| 3733 | = resolve_dynamic_type (original_target_type, view, |
| 3734 | original_value_address); |
| 3735 | |
| 3736 | /* Re-adjust type. */ |
| 3737 | deprecated_set_value_type (value, resolved_original_target_type); |
| 3738 | |
| 3739 | /* Add embedding info. */ |
| 3740 | set_value_enclosing_type (value, enc_type); |
| 3741 | set_value_embedded_offset (value, value_pointed_to_offset (original_value)); |
| 3742 | |
| 3743 | /* We may be pointing to an object of some derived type. */ |
| 3744 | return value_full_object (value, NULL, 0, 0, 0); |
| 3745 | } |
| 3746 | |
| 3747 | struct value * |
| 3748 | coerce_ref (struct value *arg) |
| 3749 | { |
| 3750 | struct type *value_type_arg_tmp = check_typedef (value_type (arg)); |
| 3751 | struct value *retval; |
| 3752 | struct type *enc_type; |
| 3753 | |
| 3754 | retval = coerce_ref_if_computed (arg); |
| 3755 | if (retval) |
| 3756 | return retval; |
| 3757 | |
| 3758 | if (!TYPE_IS_REFERENCE (value_type_arg_tmp)) |
| 3759 | return arg; |
| 3760 | |
| 3761 | enc_type = check_typedef (value_enclosing_type (arg)); |
| 3762 | enc_type = TYPE_TARGET_TYPE (enc_type); |
| 3763 | |
| 3764 | CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg)); |
| 3765 | retval = value_at_lazy (enc_type, addr); |
| 3766 | enc_type = value_type (retval); |
| 3767 | return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp, |
| 3768 | arg, addr); |
| 3769 | } |
| 3770 | |
| 3771 | struct value * |
| 3772 | coerce_array (struct value *arg) |
| 3773 | { |
| 3774 | struct type *type; |
| 3775 | |
| 3776 | arg = coerce_ref (arg); |
| 3777 | type = check_typedef (value_type (arg)); |
| 3778 | |
| 3779 | switch (type->code ()) |
| 3780 | { |
| 3781 | case TYPE_CODE_ARRAY: |
| 3782 | if (!type->is_vector () && current_language->c_style_arrays_p ()) |
| 3783 | arg = value_coerce_array (arg); |
| 3784 | break; |
| 3785 | case TYPE_CODE_FUNC: |
| 3786 | arg = value_coerce_function (arg); |
| 3787 | break; |
| 3788 | } |
| 3789 | return arg; |
| 3790 | } |
| 3791 | \f |
| 3792 | |
| 3793 | /* Return the return value convention that will be used for the |
| 3794 | specified type. */ |
| 3795 | |
| 3796 | enum return_value_convention |
| 3797 | struct_return_convention (struct gdbarch *gdbarch, |
| 3798 | struct value *function, struct type *value_type) |
| 3799 | { |
| 3800 | enum type_code code = value_type->code (); |
| 3801 | |
| 3802 | if (code == TYPE_CODE_ERROR) |
| 3803 | error (_("Function return type unknown.")); |
| 3804 | |
| 3805 | /* Probe the architecture for the return-value convention. */ |
| 3806 | return gdbarch_return_value (gdbarch, function, value_type, |
| 3807 | NULL, NULL, NULL); |
| 3808 | } |
| 3809 | |
| 3810 | /* Return true if the function returning the specified type is using |
| 3811 | the convention of returning structures in memory (passing in the |
| 3812 | address as a hidden first parameter). */ |
| 3813 | |
| 3814 | int |
| 3815 | using_struct_return (struct gdbarch *gdbarch, |
| 3816 | struct value *function, struct type *value_type) |
| 3817 | { |
| 3818 | if (value_type->code () == TYPE_CODE_VOID) |
| 3819 | /* A void return value is never in memory. See also corresponding |
| 3820 | code in "print_return_value". */ |
| 3821 | return 0; |
| 3822 | |
| 3823 | return (struct_return_convention (gdbarch, function, value_type) |
| 3824 | != RETURN_VALUE_REGISTER_CONVENTION); |
| 3825 | } |
| 3826 | |
| 3827 | /* Set the initialized field in a value struct. */ |
| 3828 | |
| 3829 | void |
| 3830 | set_value_initialized (struct value *val, int status) |
| 3831 | { |
| 3832 | val->initialized = status; |
| 3833 | } |
| 3834 | |
| 3835 | /* Return the initialized field in a value struct. */ |
| 3836 | |
| 3837 | int |
| 3838 | value_initialized (const struct value *val) |
| 3839 | { |
| 3840 | return val->initialized; |
| 3841 | } |
| 3842 | |
| 3843 | /* Helper for value_fetch_lazy when the value is a bitfield. */ |
| 3844 | |
| 3845 | static void |
| 3846 | value_fetch_lazy_bitfield (struct value *val) |
| 3847 | { |
| 3848 | gdb_assert (value_bitsize (val) != 0); |
| 3849 | |
| 3850 | /* To read a lazy bitfield, read the entire enclosing value. This |
| 3851 | prevents reading the same block of (possibly volatile) memory once |
| 3852 | per bitfield. It would be even better to read only the containing |
| 3853 | word, but we have no way to record that just specific bits of a |
| 3854 | value have been fetched. */ |
| 3855 | struct value *parent = value_parent (val); |
| 3856 | |
| 3857 | if (value_lazy (parent)) |
| 3858 | value_fetch_lazy (parent); |
| 3859 | |
| 3860 | unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val), |
| 3861 | value_contents_for_printing (parent), |
| 3862 | value_offset (val), parent); |
| 3863 | } |
| 3864 | |
| 3865 | /* Helper for value_fetch_lazy when the value is in memory. */ |
| 3866 | |
| 3867 | static void |
| 3868 | value_fetch_lazy_memory (struct value *val) |
| 3869 | { |
| 3870 | gdb_assert (VALUE_LVAL (val) == lval_memory); |
| 3871 | |
| 3872 | CORE_ADDR addr = value_address (val); |
| 3873 | struct type *type = check_typedef (value_enclosing_type (val)); |
| 3874 | |
| 3875 | if (TYPE_LENGTH (type)) |
| 3876 | read_value_memory (val, 0, value_stack (val), |
| 3877 | addr, value_contents_all_raw (val), |
| 3878 | type_length_units (type)); |
| 3879 | } |
| 3880 | |
| 3881 | /* Helper for value_fetch_lazy when the value is in a register. */ |
| 3882 | |
| 3883 | static void |
| 3884 | value_fetch_lazy_register (struct value *val) |
| 3885 | { |
| 3886 | struct frame_info *next_frame; |
| 3887 | int regnum; |
| 3888 | struct type *type = check_typedef (value_type (val)); |
| 3889 | struct value *new_val = val, *mark = value_mark (); |
| 3890 | |
| 3891 | /* Offsets are not supported here; lazy register values must |
| 3892 | refer to the entire register. */ |
| 3893 | gdb_assert (value_offset (val) == 0); |
| 3894 | |
| 3895 | while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val)) |
| 3896 | { |
| 3897 | struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val); |
| 3898 | |
| 3899 | next_frame = frame_find_by_id (next_frame_id); |
| 3900 | regnum = VALUE_REGNUM (new_val); |
| 3901 | |
| 3902 | gdb_assert (next_frame != NULL); |
| 3903 | |
| 3904 | /* Convertible register routines are used for multi-register |
| 3905 | values and for interpretation in different types |
| 3906 | (e.g. float or int from a double register). Lazy |
| 3907 | register values should have the register's natural type, |
| 3908 | so they do not apply. */ |
| 3909 | gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame), |
| 3910 | regnum, type)); |
| 3911 | |
| 3912 | /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID. |
| 3913 | Since a "->next" operation was performed when setting |
| 3914 | this field, we do not need to perform a "next" operation |
| 3915 | again when unwinding the register. That's why |
| 3916 | frame_unwind_register_value() is called here instead of |
| 3917 | get_frame_register_value(). */ |
| 3918 | new_val = frame_unwind_register_value (next_frame, regnum); |
| 3919 | |
| 3920 | /* If we get another lazy lval_register value, it means the |
| 3921 | register is found by reading it from NEXT_FRAME's next frame. |
| 3922 | frame_unwind_register_value should never return a value with |
| 3923 | the frame id pointing to NEXT_FRAME. If it does, it means we |
| 3924 | either have two consecutive frames with the same frame id |
| 3925 | in the frame chain, or some code is trying to unwind |
| 3926 | behind get_prev_frame's back (e.g., a frame unwind |
| 3927 | sniffer trying to unwind), bypassing its validations. In |
| 3928 | any case, it should always be an internal error to end up |
| 3929 | in this situation. */ |
| 3930 | if (VALUE_LVAL (new_val) == lval_register |
| 3931 | && value_lazy (new_val) |
| 3932 | && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id)) |
| 3933 | internal_error (__FILE__, __LINE__, |
| 3934 | _("infinite loop while fetching a register")); |
| 3935 | } |
| 3936 | |
| 3937 | /* If it's still lazy (for instance, a saved register on the |
| 3938 | stack), fetch it. */ |
| 3939 | if (value_lazy (new_val)) |
| 3940 | value_fetch_lazy (new_val); |
| 3941 | |
| 3942 | /* Copy the contents and the unavailability/optimized-out |
| 3943 | meta-data from NEW_VAL to VAL. */ |
| 3944 | set_value_lazy (val, 0); |
| 3945 | value_contents_copy (val, value_embedded_offset (val), |
| 3946 | new_val, value_embedded_offset (new_val), |
| 3947 | type_length_units (type)); |
| 3948 | |
| 3949 | if (frame_debug) |
| 3950 | { |
| 3951 | struct gdbarch *gdbarch; |
| 3952 | struct frame_info *frame; |
| 3953 | /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID, |
| 3954 | so that the frame level will be shown correctly. */ |
| 3955 | frame = frame_find_by_id (VALUE_FRAME_ID (val)); |
| 3956 | regnum = VALUE_REGNUM (val); |
| 3957 | gdbarch = get_frame_arch (frame); |
| 3958 | |
| 3959 | fprintf_unfiltered (gdb_stdlog, |
| 3960 | "{ value_fetch_lazy " |
| 3961 | "(frame=%d,regnum=%d(%s),...) ", |
| 3962 | frame_relative_level (frame), regnum, |
| 3963 | user_reg_map_regnum_to_name (gdbarch, regnum)); |
| 3964 | |
| 3965 | fprintf_unfiltered (gdb_stdlog, "->"); |
| 3966 | if (value_optimized_out (new_val)) |
| 3967 | { |
| 3968 | fprintf_unfiltered (gdb_stdlog, " "); |
| 3969 | val_print_optimized_out (new_val, gdb_stdlog); |
| 3970 | } |
| 3971 | else |
| 3972 | { |
| 3973 | int i; |
| 3974 | const gdb_byte *buf = value_contents (new_val); |
| 3975 | |
| 3976 | if (VALUE_LVAL (new_val) == lval_register) |
| 3977 | fprintf_unfiltered (gdb_stdlog, " register=%d", |
| 3978 | VALUE_REGNUM (new_val)); |
| 3979 | else if (VALUE_LVAL (new_val) == lval_memory) |
| 3980 | fprintf_unfiltered (gdb_stdlog, " address=%s", |
| 3981 | paddress (gdbarch, |
| 3982 | value_address (new_val))); |
| 3983 | else |
| 3984 | fprintf_unfiltered (gdb_stdlog, " computed"); |
| 3985 | |
| 3986 | fprintf_unfiltered (gdb_stdlog, " bytes="); |
| 3987 | fprintf_unfiltered (gdb_stdlog, "["); |
| 3988 | for (i = 0; i < register_size (gdbarch, regnum); i++) |
| 3989 | fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]); |
| 3990 | fprintf_unfiltered (gdb_stdlog, "]"); |
| 3991 | } |
| 3992 | |
| 3993 | fprintf_unfiltered (gdb_stdlog, " }\n"); |
| 3994 | } |
| 3995 | |
| 3996 | /* Dispose of the intermediate values. This prevents |
| 3997 | watchpoints from trying to watch the saved frame pointer. */ |
| 3998 | value_free_to_mark (mark); |
| 3999 | } |
| 4000 | |
| 4001 | /* Load the actual content of a lazy value. Fetch the data from the |
| 4002 | user's process and clear the lazy flag to indicate that the data in |
| 4003 | the buffer is valid. |
| 4004 | |
| 4005 | If the value is zero-length, we avoid calling read_memory, which |
| 4006 | would abort. We mark the value as fetched anyway -- all 0 bytes of |
| 4007 | it. */ |
| 4008 | |
| 4009 | void |
| 4010 | value_fetch_lazy (struct value *val) |
| 4011 | { |
| 4012 | gdb_assert (value_lazy (val)); |
| 4013 | allocate_value_contents (val); |
| 4014 | /* A value is either lazy, or fully fetched. The |
| 4015 | availability/validity is only established as we try to fetch a |
| 4016 | value. */ |
| 4017 | gdb_assert (val->optimized_out.empty ()); |
| 4018 | gdb_assert (val->unavailable.empty ()); |
| 4019 | if (value_bitsize (val)) |
| 4020 | value_fetch_lazy_bitfield (val); |
| 4021 | else if (VALUE_LVAL (val) == lval_memory) |
| 4022 | value_fetch_lazy_memory (val); |
| 4023 | else if (VALUE_LVAL (val) == lval_register) |
| 4024 | value_fetch_lazy_register (val); |
| 4025 | else if (VALUE_LVAL (val) == lval_computed |
| 4026 | && value_computed_funcs (val)->read != NULL) |
| 4027 | value_computed_funcs (val)->read (val); |
| 4028 | else |
| 4029 | internal_error (__FILE__, __LINE__, _("Unexpected lazy value type.")); |
| 4030 | |
| 4031 | set_value_lazy (val, 0); |
| 4032 | } |
| 4033 | |
| 4034 | /* Implementation of the convenience function $_isvoid. */ |
| 4035 | |
| 4036 | static struct value * |
| 4037 | isvoid_internal_fn (struct gdbarch *gdbarch, |
| 4038 | const struct language_defn *language, |
| 4039 | void *cookie, int argc, struct value **argv) |
| 4040 | { |
| 4041 | int ret; |
| 4042 | |
| 4043 | if (argc != 1) |
| 4044 | error (_("You must provide one argument for $_isvoid.")); |
| 4045 | |
| 4046 | ret = value_type (argv[0])->code () == TYPE_CODE_VOID; |
| 4047 | |
| 4048 | return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); |
| 4049 | } |
| 4050 | |
| 4051 | /* Implementation of the convenience function $_creal. Extracts the |
| 4052 | real part from a complex number. */ |
| 4053 | |
| 4054 | static struct value * |
| 4055 | creal_internal_fn (struct gdbarch *gdbarch, |
| 4056 | const struct language_defn *language, |
| 4057 | void *cookie, int argc, struct value **argv) |
| 4058 | { |
| 4059 | if (argc != 1) |
| 4060 | error (_("You must provide one argument for $_creal.")); |
| 4061 | |
| 4062 | value *cval = argv[0]; |
| 4063 | type *ctype = check_typedef (value_type (cval)); |
| 4064 | if (ctype->code () != TYPE_CODE_COMPLEX) |
| 4065 | error (_("expected a complex number")); |
| 4066 | return value_real_part (cval); |
| 4067 | } |
| 4068 | |
| 4069 | /* Implementation of the convenience function $_cimag. Extracts the |
| 4070 | imaginary part from a complex number. */ |
| 4071 | |
| 4072 | static struct value * |
| 4073 | cimag_internal_fn (struct gdbarch *gdbarch, |
| 4074 | const struct language_defn *language, |
| 4075 | void *cookie, int argc, |
| 4076 | struct value **argv) |
| 4077 | { |
| 4078 | if (argc != 1) |
| 4079 | error (_("You must provide one argument for $_cimag.")); |
| 4080 | |
| 4081 | value *cval = argv[0]; |
| 4082 | type *ctype = check_typedef (value_type (cval)); |
| 4083 | if (ctype->code () != TYPE_CODE_COMPLEX) |
| 4084 | error (_("expected a complex number")); |
| 4085 | return value_imaginary_part (cval); |
| 4086 | } |
| 4087 | |
| 4088 | #if GDB_SELF_TEST |
| 4089 | namespace selftests |
| 4090 | { |
| 4091 | |
| 4092 | /* Test the ranges_contain function. */ |
| 4093 | |
| 4094 | static void |
| 4095 | test_ranges_contain () |
| 4096 | { |
| 4097 | std::vector<range> ranges; |
| 4098 | range r; |
| 4099 | |
| 4100 | /* [10, 14] */ |
| 4101 | r.offset = 10; |
| 4102 | r.length = 5; |
| 4103 | ranges.push_back (r); |
| 4104 | |
| 4105 | /* [20, 24] */ |
| 4106 | r.offset = 20; |
| 4107 | r.length = 5; |
| 4108 | ranges.push_back (r); |
| 4109 | |
| 4110 | /* [2, 6] */ |
| 4111 | SELF_CHECK (!ranges_contain (ranges, 2, 5)); |
| 4112 | /* [9, 13] */ |
| 4113 | SELF_CHECK (ranges_contain (ranges, 9, 5)); |
| 4114 | /* [10, 11] */ |
| 4115 | SELF_CHECK (ranges_contain (ranges, 10, 2)); |
| 4116 | /* [10, 14] */ |
| 4117 | SELF_CHECK (ranges_contain (ranges, 10, 5)); |
| 4118 | /* [13, 18] */ |
| 4119 | SELF_CHECK (ranges_contain (ranges, 13, 6)); |
| 4120 | /* [14, 18] */ |
| 4121 | SELF_CHECK (ranges_contain (ranges, 14, 5)); |
| 4122 | /* [15, 18] */ |
| 4123 | SELF_CHECK (!ranges_contain (ranges, 15, 4)); |
| 4124 | /* [16, 19] */ |
| 4125 | SELF_CHECK (!ranges_contain (ranges, 16, 4)); |
| 4126 | /* [16, 21] */ |
| 4127 | SELF_CHECK (ranges_contain (ranges, 16, 6)); |
| 4128 | /* [21, 21] */ |
| 4129 | SELF_CHECK (ranges_contain (ranges, 21, 1)); |
| 4130 | /* [21, 25] */ |
| 4131 | SELF_CHECK (ranges_contain (ranges, 21, 5)); |
| 4132 | /* [26, 28] */ |
| 4133 | SELF_CHECK (!ranges_contain (ranges, 26, 3)); |
| 4134 | } |
| 4135 | |
| 4136 | /* Check that RANGES contains the same ranges as EXPECTED. */ |
| 4137 | |
| 4138 | static bool |
| 4139 | check_ranges_vector (gdb::array_view<const range> ranges, |
| 4140 | gdb::array_view<const range> expected) |
| 4141 | { |
| 4142 | return ranges == expected; |
| 4143 | } |
| 4144 | |
| 4145 | /* Test the insert_into_bit_range_vector function. */ |
| 4146 | |
| 4147 | static void |
| 4148 | test_insert_into_bit_range_vector () |
| 4149 | { |
| 4150 | std::vector<range> ranges; |
| 4151 | |
| 4152 | /* [10, 14] */ |
| 4153 | { |
| 4154 | insert_into_bit_range_vector (&ranges, 10, 5); |
| 4155 | static const range expected[] = { |
| 4156 | {10, 5} |
| 4157 | }; |
| 4158 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4159 | } |
| 4160 | |
| 4161 | /* [10, 14] */ |
| 4162 | { |
| 4163 | insert_into_bit_range_vector (&ranges, 11, 4); |
| 4164 | static const range expected = {10, 5}; |
| 4165 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4166 | } |
| 4167 | |
| 4168 | /* [10, 14] [20, 24] */ |
| 4169 | { |
| 4170 | insert_into_bit_range_vector (&ranges, 20, 5); |
| 4171 | static const range expected[] = { |
| 4172 | {10, 5}, |
| 4173 | {20, 5}, |
| 4174 | }; |
| 4175 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4176 | } |
| 4177 | |
| 4178 | /* [10, 14] [17, 24] */ |
| 4179 | { |
| 4180 | insert_into_bit_range_vector (&ranges, 17, 5); |
| 4181 | static const range expected[] = { |
| 4182 | {10, 5}, |
| 4183 | {17, 8}, |
| 4184 | }; |
| 4185 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4186 | } |
| 4187 | |
| 4188 | /* [2, 8] [10, 14] [17, 24] */ |
| 4189 | { |
| 4190 | insert_into_bit_range_vector (&ranges, 2, 7); |
| 4191 | static const range expected[] = { |
| 4192 | {2, 7}, |
| 4193 | {10, 5}, |
| 4194 | {17, 8}, |
| 4195 | }; |
| 4196 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4197 | } |
| 4198 | |
| 4199 | /* [2, 14] [17, 24] */ |
| 4200 | { |
| 4201 | insert_into_bit_range_vector (&ranges, 9, 1); |
| 4202 | static const range expected[] = { |
| 4203 | {2, 13}, |
| 4204 | {17, 8}, |
| 4205 | }; |
| 4206 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4207 | } |
| 4208 | |
| 4209 | /* [2, 14] [17, 24] */ |
| 4210 | { |
| 4211 | insert_into_bit_range_vector (&ranges, 9, 1); |
| 4212 | static const range expected[] = { |
| 4213 | {2, 13}, |
| 4214 | {17, 8}, |
| 4215 | }; |
| 4216 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4217 | } |
| 4218 | |
| 4219 | /* [2, 33] */ |
| 4220 | { |
| 4221 | insert_into_bit_range_vector (&ranges, 4, 30); |
| 4222 | static const range expected = {2, 32}; |
| 4223 | SELF_CHECK (check_ranges_vector (ranges, expected)); |
| 4224 | } |
| 4225 | } |
| 4226 | |
| 4227 | } /* namespace selftests */ |
| 4228 | #endif /* GDB_SELF_TEST */ |
| 4229 | |
| 4230 | void _initialize_values (); |
| 4231 | void |
| 4232 | _initialize_values () |
| 4233 | { |
| 4234 | add_cmd ("convenience", no_class, show_convenience, _("\ |
| 4235 | Debugger convenience (\"$foo\") variables and functions.\n\ |
| 4236 | Convenience variables are created when you assign them values;\n\ |
| 4237 | thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\ |
| 4238 | \n\ |
| 4239 | A few convenience variables are given values automatically:\n\ |
| 4240 | \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\ |
| 4241 | \"$__\" holds the contents of the last address examined with \"x\"." |
| 4242 | #ifdef HAVE_PYTHON |
| 4243 | "\n\n\ |
| 4244 | Convenience functions are defined via the Python API." |
| 4245 | #endif |
| 4246 | ), &showlist); |
| 4247 | add_alias_cmd ("conv", "convenience", no_class, 1, &showlist); |
| 4248 | |
| 4249 | add_cmd ("values", no_set_class, show_values, _("\ |
| 4250 | Elements of value history around item number IDX (or last ten)."), |
| 4251 | &showlist); |
| 4252 | |
| 4253 | add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\ |
| 4254 | Initialize a convenience variable if necessary.\n\ |
| 4255 | init-if-undefined VARIABLE = EXPRESSION\n\ |
| 4256 | Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\ |
| 4257 | exist or does not contain a value. The EXPRESSION is not evaluated if the\n\ |
| 4258 | VARIABLE is already initialized.")); |
| 4259 | |
| 4260 | add_prefix_cmd ("function", no_class, function_command, _("\ |
| 4261 | Placeholder command for showing help on convenience functions."), |
| 4262 | &functionlist, "function ", 0, &cmdlist); |
| 4263 | |
| 4264 | add_internal_function ("_isvoid", _("\ |
| 4265 | Check whether an expression is void.\n\ |
| 4266 | Usage: $_isvoid (expression)\n\ |
| 4267 | Return 1 if the expression is void, zero otherwise."), |
| 4268 | isvoid_internal_fn, NULL); |
| 4269 | |
| 4270 | add_internal_function ("_creal", _("\ |
| 4271 | Extract the real part of a complex number.\n\ |
| 4272 | Usage: $_creal (expression)\n\ |
| 4273 | Return the real part of a complex number, the type depends on the\n\ |
| 4274 | type of a complex number."), |
| 4275 | creal_internal_fn, NULL); |
| 4276 | |
| 4277 | add_internal_function ("_cimag", _("\ |
| 4278 | Extract the imaginary part of a complex number.\n\ |
| 4279 | Usage: $_cimag (expression)\n\ |
| 4280 | Return the imaginary part of a complex number, the type depends on the\n\ |
| 4281 | type of a complex number."), |
| 4282 | cimag_internal_fn, NULL); |
| 4283 | |
| 4284 | add_setshow_zuinteger_unlimited_cmd ("max-value-size", |
| 4285 | class_support, &max_value_size, _("\ |
| 4286 | Set maximum sized value gdb will load from the inferior."), _("\ |
| 4287 | Show maximum sized value gdb will load from the inferior."), _("\ |
| 4288 | Use this to control the maximum size, in bytes, of a value that gdb\n\ |
| 4289 | will load from the inferior. Setting this value to 'unlimited'\n\ |
| 4290 | disables checking.\n\ |
| 4291 | Setting this does not invalidate already allocated values, it only\n\ |
| 4292 | prevents future values, larger than this size, from being allocated."), |
| 4293 | set_max_value_size, |
| 4294 | show_max_value_size, |
| 4295 | &setlist, &showlist); |
| 4296 | #if GDB_SELF_TEST |
| 4297 | selftests::register_test ("ranges_contain", selftests::test_ranges_contain); |
| 4298 | selftests::register_test ("insert_into_bit_range_vector", |
| 4299 | selftests::test_insert_into_bit_range_vector); |
| 4300 | #endif |
| 4301 | } |
| 4302 | |
| 4303 | /* See value.h. */ |
| 4304 | |
| 4305 | void |
| 4306 | finalize_values () |
| 4307 | { |
| 4308 | all_values.clear (); |
| 4309 | } |