| 1 | /* Target-dependent code for the HP PA architecture, for GDB. |
| 2 | |
| 3 | Copyright 1986, 1987, 1989, 1990, 1991, 1992, 1993, 1994, 1995, |
| 4 | 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software |
| 5 | Foundation, Inc. |
| 6 | |
| 7 | Contributed by the Center for Software Science at the |
| 8 | University of Utah (pa-gdb-bugs@cs.utah.edu). |
| 9 | |
| 10 | This file is part of GDB. |
| 11 | |
| 12 | This program is free software; you can redistribute it and/or modify |
| 13 | it under the terms of the GNU General Public License as published by |
| 14 | the Free Software Foundation; either version 2 of the License, or |
| 15 | (at your option) any later version. |
| 16 | |
| 17 | This program is distributed in the hope that it will be useful, |
| 18 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 19 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 20 | GNU General Public License for more details. |
| 21 | |
| 22 | You should have received a copy of the GNU General Public License |
| 23 | along with this program; if not, write to the Free Software |
| 24 | Foundation, Inc., 59 Temple Place - Suite 330, |
| 25 | Boston, MA 02111-1307, USA. */ |
| 26 | |
| 27 | #include "defs.h" |
| 28 | #include "frame.h" |
| 29 | #include "bfd.h" |
| 30 | #include "inferior.h" |
| 31 | #include "value.h" |
| 32 | #include "regcache.h" |
| 33 | #include "completer.h" |
| 34 | #include "language.h" |
| 35 | #include "osabi.h" |
| 36 | #include "gdb_assert.h" |
| 37 | #include "infttrace.h" |
| 38 | #include "arch-utils.h" |
| 39 | /* For argument passing to the inferior */ |
| 40 | #include "symtab.h" |
| 41 | #include "infcall.h" |
| 42 | #include "dis-asm.h" |
| 43 | #include "trad-frame.h" |
| 44 | #include "frame-unwind.h" |
| 45 | #include "frame-base.h" |
| 46 | |
| 47 | #ifdef USG |
| 48 | #include <sys/types.h> |
| 49 | #endif |
| 50 | |
| 51 | #include <dl.h> |
| 52 | #include <sys/param.h> |
| 53 | #include <signal.h> |
| 54 | |
| 55 | #include <sys/ptrace.h> |
| 56 | #include <machine/save_state.h> |
| 57 | |
| 58 | #ifdef COFF_ENCAPSULATE |
| 59 | #include "a.out.encap.h" |
| 60 | #else |
| 61 | #endif |
| 62 | |
| 63 | /*#include <sys/user.h> After a.out.h */ |
| 64 | #include <sys/file.h> |
| 65 | #include "gdb_stat.h" |
| 66 | #include "gdb_wait.h" |
| 67 | |
| 68 | #include "gdbcore.h" |
| 69 | #include "gdbcmd.h" |
| 70 | #include "target.h" |
| 71 | #include "symfile.h" |
| 72 | #include "objfiles.h" |
| 73 | #include "hppa-tdep.h" |
| 74 | |
| 75 | /* Some local constants. */ |
| 76 | static const int hppa32_num_regs = 128; |
| 77 | static const int hppa64_num_regs = 96; |
| 78 | |
| 79 | /* Get at various relevent fields of an instruction word. */ |
| 80 | #define MASK_5 0x1f |
| 81 | #define MASK_11 0x7ff |
| 82 | #define MASK_14 0x3fff |
| 83 | #define MASK_21 0x1fffff |
| 84 | |
| 85 | /* Define offsets into the call dummy for the _sr4export address. |
| 86 | See comments related to CALL_DUMMY for more info. */ |
| 87 | #define SR4EXPORT_LDIL_OFFSET (INSTRUCTION_SIZE * 12) |
| 88 | #define SR4EXPORT_LDO_OFFSET (INSTRUCTION_SIZE * 13) |
| 89 | |
| 90 | /* To support detection of the pseudo-initial frame |
| 91 | that threads have. */ |
| 92 | #define THREAD_INITIAL_FRAME_SYMBOL "__pthread_exit" |
| 93 | #define THREAD_INITIAL_FRAME_SYM_LEN sizeof(THREAD_INITIAL_FRAME_SYMBOL) |
| 94 | |
| 95 | /* Sizes (in bytes) of the native unwind entries. */ |
| 96 | #define UNWIND_ENTRY_SIZE 16 |
| 97 | #define STUB_UNWIND_ENTRY_SIZE 8 |
| 98 | |
| 99 | static int get_field (unsigned word, int from, int to); |
| 100 | |
| 101 | static int extract_5_load (unsigned int); |
| 102 | |
| 103 | static unsigned extract_5R_store (unsigned int); |
| 104 | |
| 105 | static unsigned extract_5r_store (unsigned int); |
| 106 | |
| 107 | struct unwind_table_entry *find_unwind_entry (CORE_ADDR); |
| 108 | |
| 109 | static int extract_17 (unsigned int); |
| 110 | |
| 111 | static int extract_21 (unsigned); |
| 112 | |
| 113 | static int extract_14 (unsigned); |
| 114 | |
| 115 | static void unwind_command (char *, int); |
| 116 | |
| 117 | static int low_sign_extend (unsigned int, unsigned int); |
| 118 | |
| 119 | static int sign_extend (unsigned int, unsigned int); |
| 120 | |
| 121 | static int hppa_alignof (struct type *); |
| 122 | |
| 123 | static int prologue_inst_adjust_sp (unsigned long); |
| 124 | |
| 125 | static int is_branch (unsigned long); |
| 126 | |
| 127 | static int inst_saves_gr (unsigned long); |
| 128 | |
| 129 | static int inst_saves_fr (unsigned long); |
| 130 | |
| 131 | static int compare_unwind_entries (const void *, const void *); |
| 132 | |
| 133 | static void read_unwind_info (struct objfile *); |
| 134 | |
| 135 | static void internalize_unwinds (struct objfile *, |
| 136 | struct unwind_table_entry *, |
| 137 | asection *, unsigned int, |
| 138 | unsigned int, CORE_ADDR); |
| 139 | static void record_text_segment_lowaddr (bfd *, asection *, void *); |
| 140 | /* FIXME: brobecker 2002-11-07: We will likely be able to make the |
| 141 | following functions static, once we hppa is partially multiarched. */ |
| 142 | int hppa_reg_struct_has_addr (int gcc_p, struct type *type); |
| 143 | CORE_ADDR hppa_skip_prologue (CORE_ADDR pc); |
| 144 | CORE_ADDR hppa_skip_trampoline_code (CORE_ADDR pc); |
| 145 | int hppa_in_solib_call_trampoline (CORE_ADDR pc, char *name); |
| 146 | int hppa_in_solib_return_trampoline (CORE_ADDR pc, char *name); |
| 147 | int hppa_inner_than (CORE_ADDR lhs, CORE_ADDR rhs); |
| 148 | int hppa_pc_requires_run_before_use (CORE_ADDR pc); |
| 149 | int hppa_instruction_nullified (void); |
| 150 | int hppa_cannot_store_register (int regnum); |
| 151 | CORE_ADDR hppa_smash_text_address (CORE_ADDR addr); |
| 152 | CORE_ADDR hppa_target_read_pc (ptid_t ptid); |
| 153 | void hppa_target_write_pc (CORE_ADDR v, ptid_t ptid); |
| 154 | |
| 155 | typedef struct |
| 156 | { |
| 157 | struct minimal_symbol *msym; |
| 158 | CORE_ADDR solib_handle; |
| 159 | CORE_ADDR return_val; |
| 160 | } |
| 161 | args_for_find_stub; |
| 162 | |
| 163 | static int cover_find_stub_with_shl_get (void *); |
| 164 | |
| 165 | static int is_pa_2 = 0; /* False */ |
| 166 | |
| 167 | /* This is declared in symtab.c; set to 1 in hp-symtab-read.c */ |
| 168 | extern int hp_som_som_object_present; |
| 169 | |
| 170 | /* In breakpoint.c */ |
| 171 | extern int exception_catchpoints_are_fragile; |
| 172 | |
| 173 | /* Handle 32/64-bit struct return conventions. */ |
| 174 | |
| 175 | static enum return_value_convention |
| 176 | hppa32_return_value (struct gdbarch *gdbarch, |
| 177 | struct type *type, struct regcache *regcache, |
| 178 | void *readbuf, const void *writebuf) |
| 179 | { |
| 180 | if (TYPE_CODE (type) == TYPE_CODE_FLT) |
| 181 | { |
| 182 | if (readbuf != NULL) |
| 183 | regcache_cooked_read_part (regcache, FP4_REGNUM, 0, |
| 184 | TYPE_LENGTH (type), readbuf); |
| 185 | if (writebuf != NULL) |
| 186 | regcache_cooked_write_part (regcache, FP4_REGNUM, 0, |
| 187 | TYPE_LENGTH (type), writebuf); |
| 188 | return RETURN_VALUE_REGISTER_CONVENTION; |
| 189 | } |
| 190 | if (TYPE_LENGTH (type) <= 2 * 4) |
| 191 | { |
| 192 | /* The value always lives in the right hand end of the register |
| 193 | (or register pair)? */ |
| 194 | int b; |
| 195 | int reg = 28; |
| 196 | int part = TYPE_LENGTH (type) % 4; |
| 197 | /* The left hand register contains only part of the value, |
| 198 | transfer that first so that the rest can be xfered as entire |
| 199 | 4-byte registers. */ |
| 200 | if (part > 0) |
| 201 | { |
| 202 | if (readbuf != NULL) |
| 203 | regcache_cooked_read_part (regcache, reg, 4 - part, |
| 204 | part, readbuf); |
| 205 | if (writebuf != NULL) |
| 206 | regcache_cooked_write_part (regcache, reg, 4 - part, |
| 207 | part, writebuf); |
| 208 | reg++; |
| 209 | } |
| 210 | /* Now transfer the remaining register values. */ |
| 211 | for (b = part; b < TYPE_LENGTH (type); b += 4) |
| 212 | { |
| 213 | if (readbuf != NULL) |
| 214 | regcache_cooked_read (regcache, reg, (char *) readbuf + b); |
| 215 | if (writebuf != NULL) |
| 216 | regcache_cooked_write (regcache, reg, (const char *) writebuf + b); |
| 217 | reg++; |
| 218 | } |
| 219 | return RETURN_VALUE_REGISTER_CONVENTION; |
| 220 | } |
| 221 | else |
| 222 | return RETURN_VALUE_STRUCT_CONVENTION; |
| 223 | } |
| 224 | |
| 225 | static enum return_value_convention |
| 226 | hppa64_return_value (struct gdbarch *gdbarch, |
| 227 | struct type *type, struct regcache *regcache, |
| 228 | void *readbuf, const void *writebuf) |
| 229 | { |
| 230 | /* RM: Floats are returned in FR4R, doubles in FR4. Integral values |
| 231 | are in r28, padded on the left. Aggregates less that 65 bits are |
| 232 | in r28, right padded. Aggregates upto 128 bits are in r28 and |
| 233 | r29, right padded. */ |
| 234 | if (TYPE_CODE (type) == TYPE_CODE_FLT |
| 235 | && TYPE_LENGTH (type) <= 8) |
| 236 | { |
| 237 | /* Floats are right aligned? */ |
| 238 | int offset = register_size (gdbarch, FP4_REGNUM) - TYPE_LENGTH (type); |
| 239 | if (readbuf != NULL) |
| 240 | regcache_cooked_read_part (regcache, FP4_REGNUM, offset, |
| 241 | TYPE_LENGTH (type), readbuf); |
| 242 | if (writebuf != NULL) |
| 243 | regcache_cooked_write_part (regcache, FP4_REGNUM, offset, |
| 244 | TYPE_LENGTH (type), writebuf); |
| 245 | return RETURN_VALUE_REGISTER_CONVENTION; |
| 246 | } |
| 247 | else if (TYPE_LENGTH (type) <= 8 && is_integral_type (type)) |
| 248 | { |
| 249 | /* Integrals are right aligned. */ |
| 250 | int offset = register_size (gdbarch, FP4_REGNUM) - TYPE_LENGTH (type); |
| 251 | if (readbuf != NULL) |
| 252 | regcache_cooked_read_part (regcache, 28, offset, |
| 253 | TYPE_LENGTH (type), readbuf); |
| 254 | if (writebuf != NULL) |
| 255 | regcache_cooked_write_part (regcache, 28, offset, |
| 256 | TYPE_LENGTH (type), writebuf); |
| 257 | return RETURN_VALUE_REGISTER_CONVENTION; |
| 258 | } |
| 259 | else if (TYPE_LENGTH (type) <= 2 * 8) |
| 260 | { |
| 261 | /* Composite values are left aligned. */ |
| 262 | int b; |
| 263 | for (b = 0; b < TYPE_LENGTH (type); b += 8) |
| 264 | { |
| 265 | int part = min (8, TYPE_LENGTH (type) - b); |
| 266 | if (readbuf != NULL) |
| 267 | regcache_cooked_read_part (regcache, 28 + b / 8, 0, part, |
| 268 | (char *) readbuf + b); |
| 269 | if (writebuf != NULL) |
| 270 | regcache_cooked_write_part (regcache, 28 + b / 8, 0, part, |
| 271 | (const char *) writebuf + b); |
| 272 | } |
| 273 | return RETURN_VALUE_REGISTER_CONVENTION; |
| 274 | } |
| 275 | else |
| 276 | return RETURN_VALUE_STRUCT_CONVENTION; |
| 277 | } |
| 278 | |
| 279 | /* Routines to extract various sized constants out of hppa |
| 280 | instructions. */ |
| 281 | |
| 282 | /* This assumes that no garbage lies outside of the lower bits of |
| 283 | value. */ |
| 284 | |
| 285 | static int |
| 286 | sign_extend (unsigned val, unsigned bits) |
| 287 | { |
| 288 | return (int) (val >> (bits - 1) ? (-1 << bits) | val : val); |
| 289 | } |
| 290 | |
| 291 | /* For many immediate values the sign bit is the low bit! */ |
| 292 | |
| 293 | static int |
| 294 | low_sign_extend (unsigned val, unsigned bits) |
| 295 | { |
| 296 | return (int) ((val & 0x1 ? (-1 << (bits - 1)) : 0) | val >> 1); |
| 297 | } |
| 298 | |
| 299 | /* Extract the bits at positions between FROM and TO, using HP's numbering |
| 300 | (MSB = 0). */ |
| 301 | |
| 302 | static int |
| 303 | get_field (unsigned word, int from, int to) |
| 304 | { |
| 305 | return ((word) >> (31 - (to)) & ((1 << ((to) - (from) + 1)) - 1)); |
| 306 | } |
| 307 | |
| 308 | /* extract the immediate field from a ld{bhw}s instruction */ |
| 309 | |
| 310 | static int |
| 311 | extract_5_load (unsigned word) |
| 312 | { |
| 313 | return low_sign_extend (word >> 16 & MASK_5, 5); |
| 314 | } |
| 315 | |
| 316 | /* extract the immediate field from a break instruction */ |
| 317 | |
| 318 | static unsigned |
| 319 | extract_5r_store (unsigned word) |
| 320 | { |
| 321 | return (word & MASK_5); |
| 322 | } |
| 323 | |
| 324 | /* extract the immediate field from a {sr}sm instruction */ |
| 325 | |
| 326 | static unsigned |
| 327 | extract_5R_store (unsigned word) |
| 328 | { |
| 329 | return (word >> 16 & MASK_5); |
| 330 | } |
| 331 | |
| 332 | /* extract a 14 bit immediate field */ |
| 333 | |
| 334 | static int |
| 335 | extract_14 (unsigned word) |
| 336 | { |
| 337 | return low_sign_extend (word & MASK_14, 14); |
| 338 | } |
| 339 | |
| 340 | /* extract a 21 bit constant */ |
| 341 | |
| 342 | static int |
| 343 | extract_21 (unsigned word) |
| 344 | { |
| 345 | int val; |
| 346 | |
| 347 | word &= MASK_21; |
| 348 | word <<= 11; |
| 349 | val = get_field (word, 20, 20); |
| 350 | val <<= 11; |
| 351 | val |= get_field (word, 9, 19); |
| 352 | val <<= 2; |
| 353 | val |= get_field (word, 5, 6); |
| 354 | val <<= 5; |
| 355 | val |= get_field (word, 0, 4); |
| 356 | val <<= 2; |
| 357 | val |= get_field (word, 7, 8); |
| 358 | return sign_extend (val, 21) << 11; |
| 359 | } |
| 360 | |
| 361 | /* extract a 17 bit constant from branch instructions, returning the |
| 362 | 19 bit signed value. */ |
| 363 | |
| 364 | static int |
| 365 | extract_17 (unsigned word) |
| 366 | { |
| 367 | return sign_extend (get_field (word, 19, 28) | |
| 368 | get_field (word, 29, 29) << 10 | |
| 369 | get_field (word, 11, 15) << 11 | |
| 370 | (word & 0x1) << 16, 17) << 2; |
| 371 | } |
| 372 | \f |
| 373 | |
| 374 | /* Compare the start address for two unwind entries returning 1 if |
| 375 | the first address is larger than the second, -1 if the second is |
| 376 | larger than the first, and zero if they are equal. */ |
| 377 | |
| 378 | static int |
| 379 | compare_unwind_entries (const void *arg1, const void *arg2) |
| 380 | { |
| 381 | const struct unwind_table_entry *a = arg1; |
| 382 | const struct unwind_table_entry *b = arg2; |
| 383 | |
| 384 | if (a->region_start > b->region_start) |
| 385 | return 1; |
| 386 | else if (a->region_start < b->region_start) |
| 387 | return -1; |
| 388 | else |
| 389 | return 0; |
| 390 | } |
| 391 | |
| 392 | static CORE_ADDR low_text_segment_address; |
| 393 | |
| 394 | static void |
| 395 | record_text_segment_lowaddr (bfd *abfd, asection *section, void *ignored) |
| 396 | { |
| 397 | if (((section->flags & (SEC_ALLOC | SEC_LOAD | SEC_READONLY)) |
| 398 | == (SEC_ALLOC | SEC_LOAD | SEC_READONLY)) |
| 399 | && section->vma < low_text_segment_address) |
| 400 | low_text_segment_address = section->vma; |
| 401 | } |
| 402 | |
| 403 | static void |
| 404 | internalize_unwinds (struct objfile *objfile, struct unwind_table_entry *table, |
| 405 | asection *section, unsigned int entries, unsigned int size, |
| 406 | CORE_ADDR text_offset) |
| 407 | { |
| 408 | /* We will read the unwind entries into temporary memory, then |
| 409 | fill in the actual unwind table. */ |
| 410 | if (size > 0) |
| 411 | { |
| 412 | unsigned long tmp; |
| 413 | unsigned i; |
| 414 | char *buf = alloca (size); |
| 415 | |
| 416 | low_text_segment_address = -1; |
| 417 | |
| 418 | /* If addresses are 64 bits wide, then unwinds are supposed to |
| 419 | be segment relative offsets instead of absolute addresses. |
| 420 | |
| 421 | Note that when loading a shared library (text_offset != 0) the |
| 422 | unwinds are already relative to the text_offset that will be |
| 423 | passed in. */ |
| 424 | if (TARGET_PTR_BIT == 64 && text_offset == 0) |
| 425 | { |
| 426 | bfd_map_over_sections (objfile->obfd, |
| 427 | record_text_segment_lowaddr, NULL); |
| 428 | |
| 429 | /* ?!? Mask off some low bits. Should this instead subtract |
| 430 | out the lowest section's filepos or something like that? |
| 431 | This looks very hokey to me. */ |
| 432 | low_text_segment_address &= ~0xfff; |
| 433 | text_offset += low_text_segment_address; |
| 434 | } |
| 435 | |
| 436 | bfd_get_section_contents (objfile->obfd, section, buf, 0, size); |
| 437 | |
| 438 | /* Now internalize the information being careful to handle host/target |
| 439 | endian issues. */ |
| 440 | for (i = 0; i < entries; i++) |
| 441 | { |
| 442 | table[i].region_start = bfd_get_32 (objfile->obfd, |
| 443 | (bfd_byte *) buf); |
| 444 | table[i].region_start += text_offset; |
| 445 | buf += 4; |
| 446 | table[i].region_end = bfd_get_32 (objfile->obfd, (bfd_byte *) buf); |
| 447 | table[i].region_end += text_offset; |
| 448 | buf += 4; |
| 449 | tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf); |
| 450 | buf += 4; |
| 451 | table[i].Cannot_unwind = (tmp >> 31) & 0x1; |
| 452 | table[i].Millicode = (tmp >> 30) & 0x1; |
| 453 | table[i].Millicode_save_sr0 = (tmp >> 29) & 0x1; |
| 454 | table[i].Region_description = (tmp >> 27) & 0x3; |
| 455 | table[i].reserved1 = (tmp >> 26) & 0x1; |
| 456 | table[i].Entry_SR = (tmp >> 25) & 0x1; |
| 457 | table[i].Entry_FR = (tmp >> 21) & 0xf; |
| 458 | table[i].Entry_GR = (tmp >> 16) & 0x1f; |
| 459 | table[i].Args_stored = (tmp >> 15) & 0x1; |
| 460 | table[i].Variable_Frame = (tmp >> 14) & 0x1; |
| 461 | table[i].Separate_Package_Body = (tmp >> 13) & 0x1; |
| 462 | table[i].Frame_Extension_Millicode = (tmp >> 12) & 0x1; |
| 463 | table[i].Stack_Overflow_Check = (tmp >> 11) & 0x1; |
| 464 | table[i].Two_Instruction_SP_Increment = (tmp >> 10) & 0x1; |
| 465 | table[i].Ada_Region = (tmp >> 9) & 0x1; |
| 466 | table[i].cxx_info = (tmp >> 8) & 0x1; |
| 467 | table[i].cxx_try_catch = (tmp >> 7) & 0x1; |
| 468 | table[i].sched_entry_seq = (tmp >> 6) & 0x1; |
| 469 | table[i].reserved2 = (tmp >> 5) & 0x1; |
| 470 | table[i].Save_SP = (tmp >> 4) & 0x1; |
| 471 | table[i].Save_RP = (tmp >> 3) & 0x1; |
| 472 | table[i].Save_MRP_in_frame = (tmp >> 2) & 0x1; |
| 473 | table[i].extn_ptr_defined = (tmp >> 1) & 0x1; |
| 474 | table[i].Cleanup_defined = tmp & 0x1; |
| 475 | tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf); |
| 476 | buf += 4; |
| 477 | table[i].MPE_XL_interrupt_marker = (tmp >> 31) & 0x1; |
| 478 | table[i].HP_UX_interrupt_marker = (tmp >> 30) & 0x1; |
| 479 | table[i].Large_frame = (tmp >> 29) & 0x1; |
| 480 | table[i].Pseudo_SP_Set = (tmp >> 28) & 0x1; |
| 481 | table[i].reserved4 = (tmp >> 27) & 0x1; |
| 482 | table[i].Total_frame_size = tmp & 0x7ffffff; |
| 483 | |
| 484 | /* Stub unwinds are handled elsewhere. */ |
| 485 | table[i].stub_unwind.stub_type = 0; |
| 486 | table[i].stub_unwind.padding = 0; |
| 487 | } |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | /* Read in the backtrace information stored in the `$UNWIND_START$' section of |
| 492 | the object file. This info is used mainly by find_unwind_entry() to find |
| 493 | out the stack frame size and frame pointer used by procedures. We put |
| 494 | everything on the psymbol obstack in the objfile so that it automatically |
| 495 | gets freed when the objfile is destroyed. */ |
| 496 | |
| 497 | static void |
| 498 | read_unwind_info (struct objfile *objfile) |
| 499 | { |
| 500 | asection *unwind_sec, *stub_unwind_sec; |
| 501 | unsigned unwind_size, stub_unwind_size, total_size; |
| 502 | unsigned index, unwind_entries; |
| 503 | unsigned stub_entries, total_entries; |
| 504 | CORE_ADDR text_offset; |
| 505 | struct obj_unwind_info *ui; |
| 506 | obj_private_data_t *obj_private; |
| 507 | |
| 508 | text_offset = ANOFFSET (objfile->section_offsets, 0); |
| 509 | ui = (struct obj_unwind_info *) obstack_alloc (&objfile->objfile_obstack, |
| 510 | sizeof (struct obj_unwind_info)); |
| 511 | |
| 512 | ui->table = NULL; |
| 513 | ui->cache = NULL; |
| 514 | ui->last = -1; |
| 515 | |
| 516 | /* For reasons unknown the HP PA64 tools generate multiple unwinder |
| 517 | sections in a single executable. So we just iterate over every |
| 518 | section in the BFD looking for unwinder sections intead of trying |
| 519 | to do a lookup with bfd_get_section_by_name. |
| 520 | |
| 521 | First determine the total size of the unwind tables so that we |
| 522 | can allocate memory in a nice big hunk. */ |
| 523 | total_entries = 0; |
| 524 | for (unwind_sec = objfile->obfd->sections; |
| 525 | unwind_sec; |
| 526 | unwind_sec = unwind_sec->next) |
| 527 | { |
| 528 | if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0 |
| 529 | || strcmp (unwind_sec->name, ".PARISC.unwind") == 0) |
| 530 | { |
| 531 | unwind_size = bfd_section_size (objfile->obfd, unwind_sec); |
| 532 | unwind_entries = unwind_size / UNWIND_ENTRY_SIZE; |
| 533 | |
| 534 | total_entries += unwind_entries; |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | /* Now compute the size of the stub unwinds. Note the ELF tools do not |
| 539 | use stub unwinds at the curren time. */ |
| 540 | stub_unwind_sec = bfd_get_section_by_name (objfile->obfd, "$UNWIND_END$"); |
| 541 | |
| 542 | if (stub_unwind_sec) |
| 543 | { |
| 544 | stub_unwind_size = bfd_section_size (objfile->obfd, stub_unwind_sec); |
| 545 | stub_entries = stub_unwind_size / STUB_UNWIND_ENTRY_SIZE; |
| 546 | } |
| 547 | else |
| 548 | { |
| 549 | stub_unwind_size = 0; |
| 550 | stub_entries = 0; |
| 551 | } |
| 552 | |
| 553 | /* Compute total number of unwind entries and their total size. */ |
| 554 | total_entries += stub_entries; |
| 555 | total_size = total_entries * sizeof (struct unwind_table_entry); |
| 556 | |
| 557 | /* Allocate memory for the unwind table. */ |
| 558 | ui->table = (struct unwind_table_entry *) |
| 559 | obstack_alloc (&objfile->objfile_obstack, total_size); |
| 560 | ui->last = total_entries - 1; |
| 561 | |
| 562 | /* Now read in each unwind section and internalize the standard unwind |
| 563 | entries. */ |
| 564 | index = 0; |
| 565 | for (unwind_sec = objfile->obfd->sections; |
| 566 | unwind_sec; |
| 567 | unwind_sec = unwind_sec->next) |
| 568 | { |
| 569 | if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0 |
| 570 | || strcmp (unwind_sec->name, ".PARISC.unwind") == 0) |
| 571 | { |
| 572 | unwind_size = bfd_section_size (objfile->obfd, unwind_sec); |
| 573 | unwind_entries = unwind_size / UNWIND_ENTRY_SIZE; |
| 574 | |
| 575 | internalize_unwinds (objfile, &ui->table[index], unwind_sec, |
| 576 | unwind_entries, unwind_size, text_offset); |
| 577 | index += unwind_entries; |
| 578 | } |
| 579 | } |
| 580 | |
| 581 | /* Now read in and internalize the stub unwind entries. */ |
| 582 | if (stub_unwind_size > 0) |
| 583 | { |
| 584 | unsigned int i; |
| 585 | char *buf = alloca (stub_unwind_size); |
| 586 | |
| 587 | /* Read in the stub unwind entries. */ |
| 588 | bfd_get_section_contents (objfile->obfd, stub_unwind_sec, buf, |
| 589 | 0, stub_unwind_size); |
| 590 | |
| 591 | /* Now convert them into regular unwind entries. */ |
| 592 | for (i = 0; i < stub_entries; i++, index++) |
| 593 | { |
| 594 | /* Clear out the next unwind entry. */ |
| 595 | memset (&ui->table[index], 0, sizeof (struct unwind_table_entry)); |
| 596 | |
| 597 | /* Convert offset & size into region_start and region_end. |
| 598 | Stuff away the stub type into "reserved" fields. */ |
| 599 | ui->table[index].region_start = bfd_get_32 (objfile->obfd, |
| 600 | (bfd_byte *) buf); |
| 601 | ui->table[index].region_start += text_offset; |
| 602 | buf += 4; |
| 603 | ui->table[index].stub_unwind.stub_type = bfd_get_8 (objfile->obfd, |
| 604 | (bfd_byte *) buf); |
| 605 | buf += 2; |
| 606 | ui->table[index].region_end |
| 607 | = ui->table[index].region_start + 4 * |
| 608 | (bfd_get_16 (objfile->obfd, (bfd_byte *) buf) - 1); |
| 609 | buf += 2; |
| 610 | } |
| 611 | |
| 612 | } |
| 613 | |
| 614 | /* Unwind table needs to be kept sorted. */ |
| 615 | qsort (ui->table, total_entries, sizeof (struct unwind_table_entry), |
| 616 | compare_unwind_entries); |
| 617 | |
| 618 | /* Keep a pointer to the unwind information. */ |
| 619 | if (objfile->obj_private == NULL) |
| 620 | { |
| 621 | obj_private = (obj_private_data_t *) |
| 622 | obstack_alloc (&objfile->objfile_obstack, |
| 623 | sizeof (obj_private_data_t)); |
| 624 | obj_private->unwind_info = NULL; |
| 625 | obj_private->so_info = NULL; |
| 626 | obj_private->dp = 0; |
| 627 | |
| 628 | objfile->obj_private = obj_private; |
| 629 | } |
| 630 | obj_private = (obj_private_data_t *) objfile->obj_private; |
| 631 | obj_private->unwind_info = ui; |
| 632 | } |
| 633 | |
| 634 | /* Lookup the unwind (stack backtrace) info for the given PC. We search all |
| 635 | of the objfiles seeking the unwind table entry for this PC. Each objfile |
| 636 | contains a sorted list of struct unwind_table_entry. Since we do a binary |
| 637 | search of the unwind tables, we depend upon them to be sorted. */ |
| 638 | |
| 639 | struct unwind_table_entry * |
| 640 | find_unwind_entry (CORE_ADDR pc) |
| 641 | { |
| 642 | int first, middle, last; |
| 643 | struct objfile *objfile; |
| 644 | |
| 645 | /* A function at address 0? Not in HP-UX! */ |
| 646 | if (pc == (CORE_ADDR) 0) |
| 647 | return NULL; |
| 648 | |
| 649 | ALL_OBJFILES (objfile) |
| 650 | { |
| 651 | struct obj_unwind_info *ui; |
| 652 | ui = NULL; |
| 653 | if (objfile->obj_private) |
| 654 | ui = ((obj_private_data_t *) (objfile->obj_private))->unwind_info; |
| 655 | |
| 656 | if (!ui) |
| 657 | { |
| 658 | read_unwind_info (objfile); |
| 659 | if (objfile->obj_private == NULL) |
| 660 | error ("Internal error reading unwind information."); |
| 661 | ui = ((obj_private_data_t *) (objfile->obj_private))->unwind_info; |
| 662 | } |
| 663 | |
| 664 | /* First, check the cache */ |
| 665 | |
| 666 | if (ui->cache |
| 667 | && pc >= ui->cache->region_start |
| 668 | && pc <= ui->cache->region_end) |
| 669 | return ui->cache; |
| 670 | |
| 671 | /* Not in the cache, do a binary search */ |
| 672 | |
| 673 | first = 0; |
| 674 | last = ui->last; |
| 675 | |
| 676 | while (first <= last) |
| 677 | { |
| 678 | middle = (first + last) / 2; |
| 679 | if (pc >= ui->table[middle].region_start |
| 680 | && pc <= ui->table[middle].region_end) |
| 681 | { |
| 682 | ui->cache = &ui->table[middle]; |
| 683 | return &ui->table[middle]; |
| 684 | } |
| 685 | |
| 686 | if (pc < ui->table[middle].region_start) |
| 687 | last = middle - 1; |
| 688 | else |
| 689 | first = middle + 1; |
| 690 | } |
| 691 | } /* ALL_OBJFILES() */ |
| 692 | return NULL; |
| 693 | } |
| 694 | |
| 695 | const unsigned char * |
| 696 | hppa_breakpoint_from_pc (CORE_ADDR *pc, int *len) |
| 697 | { |
| 698 | static const unsigned char breakpoint[] = {0x00, 0x01, 0x00, 0x04}; |
| 699 | (*len) = sizeof (breakpoint); |
| 700 | return breakpoint; |
| 701 | } |
| 702 | |
| 703 | /* Return the name of a register. */ |
| 704 | |
| 705 | const char * |
| 706 | hppa32_register_name (int i) |
| 707 | { |
| 708 | static char *names[] = { |
| 709 | "flags", "r1", "rp", "r3", |
| 710 | "r4", "r5", "r6", "r7", |
| 711 | "r8", "r9", "r10", "r11", |
| 712 | "r12", "r13", "r14", "r15", |
| 713 | "r16", "r17", "r18", "r19", |
| 714 | "r20", "r21", "r22", "r23", |
| 715 | "r24", "r25", "r26", "dp", |
| 716 | "ret0", "ret1", "sp", "r31", |
| 717 | "sar", "pcoqh", "pcsqh", "pcoqt", |
| 718 | "pcsqt", "eiem", "iir", "isr", |
| 719 | "ior", "ipsw", "goto", "sr4", |
| 720 | "sr0", "sr1", "sr2", "sr3", |
| 721 | "sr5", "sr6", "sr7", "cr0", |
| 722 | "cr8", "cr9", "ccr", "cr12", |
| 723 | "cr13", "cr24", "cr25", "cr26", |
| 724 | "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad", |
| 725 | "fpsr", "fpe1", "fpe2", "fpe3", |
| 726 | "fpe4", "fpe5", "fpe6", "fpe7", |
| 727 | "fr4", "fr4R", "fr5", "fr5R", |
| 728 | "fr6", "fr6R", "fr7", "fr7R", |
| 729 | "fr8", "fr8R", "fr9", "fr9R", |
| 730 | "fr10", "fr10R", "fr11", "fr11R", |
| 731 | "fr12", "fr12R", "fr13", "fr13R", |
| 732 | "fr14", "fr14R", "fr15", "fr15R", |
| 733 | "fr16", "fr16R", "fr17", "fr17R", |
| 734 | "fr18", "fr18R", "fr19", "fr19R", |
| 735 | "fr20", "fr20R", "fr21", "fr21R", |
| 736 | "fr22", "fr22R", "fr23", "fr23R", |
| 737 | "fr24", "fr24R", "fr25", "fr25R", |
| 738 | "fr26", "fr26R", "fr27", "fr27R", |
| 739 | "fr28", "fr28R", "fr29", "fr29R", |
| 740 | "fr30", "fr30R", "fr31", "fr31R" |
| 741 | }; |
| 742 | if (i < 0 || i >= (sizeof (names) / sizeof (*names))) |
| 743 | return NULL; |
| 744 | else |
| 745 | return names[i]; |
| 746 | } |
| 747 | |
| 748 | const char * |
| 749 | hppa64_register_name (int i) |
| 750 | { |
| 751 | static char *names[] = { |
| 752 | "flags", "r1", "rp", "r3", |
| 753 | "r4", "r5", "r6", "r7", |
| 754 | "r8", "r9", "r10", "r11", |
| 755 | "r12", "r13", "r14", "r15", |
| 756 | "r16", "r17", "r18", "r19", |
| 757 | "r20", "r21", "r22", "r23", |
| 758 | "r24", "r25", "r26", "dp", |
| 759 | "ret0", "ret1", "sp", "r31", |
| 760 | "sar", "pcoqh", "pcsqh", "pcoqt", |
| 761 | "pcsqt", "eiem", "iir", "isr", |
| 762 | "ior", "ipsw", "goto", "sr4", |
| 763 | "sr0", "sr1", "sr2", "sr3", |
| 764 | "sr5", "sr6", "sr7", "cr0", |
| 765 | "cr8", "cr9", "ccr", "cr12", |
| 766 | "cr13", "cr24", "cr25", "cr26", |
| 767 | "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad", |
| 768 | "fpsr", "fpe1", "fpe2", "fpe3", |
| 769 | "fr4", "fr5", "fr6", "fr7", |
| 770 | "fr8", "fr9", "fr10", "fr11", |
| 771 | "fr12", "fr13", "fr14", "fr15", |
| 772 | "fr16", "fr17", "fr18", "fr19", |
| 773 | "fr20", "fr21", "fr22", "fr23", |
| 774 | "fr24", "fr25", "fr26", "fr27", |
| 775 | "fr28", "fr29", "fr30", "fr31" |
| 776 | }; |
| 777 | if (i < 0 || i >= (sizeof (names) / sizeof (*names))) |
| 778 | return NULL; |
| 779 | else |
| 780 | return names[i]; |
| 781 | } |
| 782 | |
| 783 | |
| 784 | |
| 785 | /* Return the adjustment necessary to make for addresses on the stack |
| 786 | as presented by hpread.c. |
| 787 | |
| 788 | This is necessary because of the stack direction on the PA and the |
| 789 | bizarre way in which someone (?) decided they wanted to handle |
| 790 | frame pointerless code in GDB. */ |
| 791 | int |
| 792 | hpread_adjust_stack_address (CORE_ADDR func_addr) |
| 793 | { |
| 794 | struct unwind_table_entry *u; |
| 795 | |
| 796 | u = find_unwind_entry (func_addr); |
| 797 | if (!u) |
| 798 | return 0; |
| 799 | else |
| 800 | return u->Total_frame_size << 3; |
| 801 | } |
| 802 | |
| 803 | /* This function pushes a stack frame with arguments as part of the |
| 804 | inferior function calling mechanism. |
| 805 | |
| 806 | This is the version of the function for the 32-bit PA machines, in |
| 807 | which later arguments appear at lower addresses. (The stack always |
| 808 | grows towards higher addresses.) |
| 809 | |
| 810 | We simply allocate the appropriate amount of stack space and put |
| 811 | arguments into their proper slots. */ |
| 812 | |
| 813 | CORE_ADDR |
| 814 | hppa32_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr, |
| 815 | struct regcache *regcache, CORE_ADDR bp_addr, |
| 816 | int nargs, struct value **args, CORE_ADDR sp, |
| 817 | int struct_return, CORE_ADDR struct_addr) |
| 818 | { |
| 819 | /* NOTE: cagney/2004-02-27: This is a guess - its implemented by |
| 820 | reverse engineering testsuite failures. */ |
| 821 | |
| 822 | /* Stack base address at which any pass-by-reference parameters are |
| 823 | stored. */ |
| 824 | CORE_ADDR struct_end = 0; |
| 825 | /* Stack base address at which the first parameter is stored. */ |
| 826 | CORE_ADDR param_end = 0; |
| 827 | |
| 828 | /* The inner most end of the stack after all the parameters have |
| 829 | been pushed. */ |
| 830 | CORE_ADDR new_sp = 0; |
| 831 | |
| 832 | /* Two passes. First pass computes the location of everything, |
| 833 | second pass writes the bytes out. */ |
| 834 | int write_pass; |
| 835 | for (write_pass = 0; write_pass < 2; write_pass++) |
| 836 | { |
| 837 | CORE_ADDR struct_ptr = 0; |
| 838 | CORE_ADDR param_ptr = 0; |
| 839 | int reg = 27; /* NOTE: Registers go down. */ |
| 840 | int i; |
| 841 | for (i = 0; i < nargs; i++) |
| 842 | { |
| 843 | struct value *arg = args[i]; |
| 844 | struct type *type = check_typedef (VALUE_TYPE (arg)); |
| 845 | /* The corresponding parameter that is pushed onto the |
| 846 | stack, and [possibly] passed in a register. */ |
| 847 | char param_val[8]; |
| 848 | int param_len; |
| 849 | memset (param_val, 0, sizeof param_val); |
| 850 | if (TYPE_LENGTH (type) > 8) |
| 851 | { |
| 852 | /* Large parameter, pass by reference. Store the value |
| 853 | in "struct" area and then pass its address. */ |
| 854 | param_len = 4; |
| 855 | struct_ptr += align_up (TYPE_LENGTH (type), 8); |
| 856 | if (write_pass) |
| 857 | write_memory (struct_end - struct_ptr, VALUE_CONTENTS (arg), |
| 858 | TYPE_LENGTH (type)); |
| 859 | store_unsigned_integer (param_val, 4, struct_end - struct_ptr); |
| 860 | } |
| 861 | else if (TYPE_CODE (type) == TYPE_CODE_INT |
| 862 | || TYPE_CODE (type) == TYPE_CODE_ENUM) |
| 863 | { |
| 864 | /* Integer value store, right aligned. "unpack_long" |
| 865 | takes care of any sign-extension problems. */ |
| 866 | param_len = align_up (TYPE_LENGTH (type), 4); |
| 867 | store_unsigned_integer (param_val, param_len, |
| 868 | unpack_long (type, |
| 869 | VALUE_CONTENTS (arg))); |
| 870 | } |
| 871 | else |
| 872 | { |
| 873 | /* Small struct value, store right aligned? */ |
| 874 | param_len = align_up (TYPE_LENGTH (type), 4); |
| 875 | memcpy (param_val + param_len - TYPE_LENGTH (type), |
| 876 | VALUE_CONTENTS (arg), TYPE_LENGTH (type)); |
| 877 | } |
| 878 | param_ptr += param_len; |
| 879 | reg -= param_len / 4; |
| 880 | if (write_pass) |
| 881 | { |
| 882 | write_memory (param_end - param_ptr, param_val, param_len); |
| 883 | if (reg >= 23) |
| 884 | { |
| 885 | regcache_cooked_write (regcache, reg, param_val); |
| 886 | if (param_len > 4) |
| 887 | regcache_cooked_write (regcache, reg + 1, param_val + 4); |
| 888 | } |
| 889 | } |
| 890 | } |
| 891 | |
| 892 | /* Update the various stack pointers. */ |
| 893 | if (!write_pass) |
| 894 | { |
| 895 | struct_end = sp + struct_ptr; |
| 896 | /* PARAM_PTR already accounts for all the arguments passed |
| 897 | by the user. However, the ABI mandates minimum stack |
| 898 | space allocations for outgoing arguments. The ABI also |
| 899 | mandates minimum stack alignments which we must |
| 900 | preserve. */ |
| 901 | param_end = struct_end + max (align_up (param_ptr, 8), |
| 902 | REG_PARM_STACK_SPACE); |
| 903 | } |
| 904 | } |
| 905 | |
| 906 | /* If a structure has to be returned, set up register 28 to hold its |
| 907 | address */ |
| 908 | if (struct_return) |
| 909 | write_register (28, struct_addr); |
| 910 | |
| 911 | /* Set the return address. */ |
| 912 | regcache_cooked_write_unsigned (regcache, RP_REGNUM, bp_addr); |
| 913 | |
| 914 | /* The stack will have 32 bytes of additional space for a frame marker. */ |
| 915 | return param_end + 32; |
| 916 | } |
| 917 | |
| 918 | /* This function pushes a stack frame with arguments as part of the |
| 919 | inferior function calling mechanism. |
| 920 | |
| 921 | This is the version for the PA64, in which later arguments appear |
| 922 | at higher addresses. (The stack always grows towards higher |
| 923 | addresses.) |
| 924 | |
| 925 | We simply allocate the appropriate amount of stack space and put |
| 926 | arguments into their proper slots. |
| 927 | |
| 928 | This ABI also requires that the caller provide an argument pointer |
| 929 | to the callee, so we do that too. */ |
| 930 | |
| 931 | CORE_ADDR |
| 932 | hppa64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr, |
| 933 | struct regcache *regcache, CORE_ADDR bp_addr, |
| 934 | int nargs, struct value **args, CORE_ADDR sp, |
| 935 | int struct_return, CORE_ADDR struct_addr) |
| 936 | { |
| 937 | /* NOTE: cagney/2004-02-27: This is a guess - its implemented by |
| 938 | reverse engineering testsuite failures. */ |
| 939 | |
| 940 | /* Stack base address at which any pass-by-reference parameters are |
| 941 | stored. */ |
| 942 | CORE_ADDR struct_end = 0; |
| 943 | /* Stack base address at which the first parameter is stored. */ |
| 944 | CORE_ADDR param_end = 0; |
| 945 | |
| 946 | /* The inner most end of the stack after all the parameters have |
| 947 | been pushed. */ |
| 948 | CORE_ADDR new_sp = 0; |
| 949 | |
| 950 | /* Two passes. First pass computes the location of everything, |
| 951 | second pass writes the bytes out. */ |
| 952 | int write_pass; |
| 953 | for (write_pass = 0; write_pass < 2; write_pass++) |
| 954 | { |
| 955 | CORE_ADDR struct_ptr = 0; |
| 956 | CORE_ADDR param_ptr = 0; |
| 957 | int i; |
| 958 | for (i = 0; i < nargs; i++) |
| 959 | { |
| 960 | struct value *arg = args[i]; |
| 961 | struct type *type = check_typedef (VALUE_TYPE (arg)); |
| 962 | if ((TYPE_CODE (type) == TYPE_CODE_INT |
| 963 | || TYPE_CODE (type) == TYPE_CODE_ENUM) |
| 964 | && TYPE_LENGTH (type) <= 8) |
| 965 | { |
| 966 | /* Integer value store, right aligned. "unpack_long" |
| 967 | takes care of any sign-extension problems. */ |
| 968 | param_ptr += 8; |
| 969 | if (write_pass) |
| 970 | { |
| 971 | ULONGEST val = unpack_long (type, VALUE_CONTENTS (arg)); |
| 972 | int reg = 27 - param_ptr / 8; |
| 973 | write_memory_unsigned_integer (param_end - param_ptr, |
| 974 | val, 8); |
| 975 | if (reg >= 19) |
| 976 | regcache_cooked_write_unsigned (regcache, reg, val); |
| 977 | } |
| 978 | } |
| 979 | else |
| 980 | { |
| 981 | /* Small struct value, store left aligned? */ |
| 982 | int reg; |
| 983 | if (TYPE_LENGTH (type) > 8) |
| 984 | { |
| 985 | param_ptr = align_up (param_ptr, 16); |
| 986 | reg = 26 - param_ptr / 8; |
| 987 | param_ptr += align_up (TYPE_LENGTH (type), 16); |
| 988 | } |
| 989 | else |
| 990 | { |
| 991 | param_ptr = align_up (param_ptr, 8); |
| 992 | reg = 26 - param_ptr / 8; |
| 993 | param_ptr += align_up (TYPE_LENGTH (type), 8); |
| 994 | } |
| 995 | if (write_pass) |
| 996 | { |
| 997 | int byte; |
| 998 | write_memory (param_end - param_ptr, VALUE_CONTENTS (arg), |
| 999 | TYPE_LENGTH (type)); |
| 1000 | for (byte = 0; byte < TYPE_LENGTH (type); byte += 8) |
| 1001 | { |
| 1002 | if (reg >= 19) |
| 1003 | { |
| 1004 | int len = min (8, TYPE_LENGTH (type) - byte); |
| 1005 | regcache_cooked_write_part (regcache, reg, 0, len, |
| 1006 | VALUE_CONTENTS (arg) + byte); |
| 1007 | } |
| 1008 | reg--; |
| 1009 | } |
| 1010 | } |
| 1011 | } |
| 1012 | } |
| 1013 | /* Update the various stack pointers. */ |
| 1014 | if (!write_pass) |
| 1015 | { |
| 1016 | struct_end = sp + struct_ptr; |
| 1017 | /* PARAM_PTR already accounts for all the arguments passed |
| 1018 | by the user. However, the ABI mandates minimum stack |
| 1019 | space allocations for outgoing arguments. The ABI also |
| 1020 | mandates minimum stack alignments which we must |
| 1021 | preserve. */ |
| 1022 | param_end = struct_end + max (align_up (param_ptr, 16), |
| 1023 | REG_PARM_STACK_SPACE); |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | /* If a structure has to be returned, set up register 28 to hold its |
| 1028 | address */ |
| 1029 | if (struct_return) |
| 1030 | write_register (28, struct_addr); |
| 1031 | |
| 1032 | /* Set the return address. */ |
| 1033 | regcache_cooked_write_unsigned (regcache, RP_REGNUM, bp_addr); |
| 1034 | |
| 1035 | /* The stack will have 32 bytes of additional space for a frame marker. */ |
| 1036 | return param_end + 64; |
| 1037 | } |
| 1038 | |
| 1039 | static CORE_ADDR |
| 1040 | hppa32_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr) |
| 1041 | { |
| 1042 | /* HP frames are 64-byte (or cache line) aligned (yes that's _byte_ |
| 1043 | and not _bit_)! */ |
| 1044 | return align_up (addr, 64); |
| 1045 | } |
| 1046 | |
| 1047 | /* Force all frames to 16-byte alignment. Better safe than sorry. */ |
| 1048 | |
| 1049 | static CORE_ADDR |
| 1050 | hppa64_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr) |
| 1051 | { |
| 1052 | /* Just always 16-byte align. */ |
| 1053 | return align_up (addr, 16); |
| 1054 | } |
| 1055 | |
| 1056 | |
| 1057 | /* elz: Used to lookup a symbol in the shared libraries. |
| 1058 | This function calls shl_findsym, indirectly through a |
| 1059 | call to __d_shl_get. __d_shl_get is in end.c, which is always |
| 1060 | linked in by the hp compilers/linkers. |
| 1061 | The call to shl_findsym cannot be made directly because it needs |
| 1062 | to be active in target address space. |
| 1063 | inputs: - minimal symbol pointer for the function we want to look up |
| 1064 | - address in target space of the descriptor for the library |
| 1065 | where we want to look the symbol up. |
| 1066 | This address is retrieved using the |
| 1067 | som_solib_get_solib_by_pc function (somsolib.c). |
| 1068 | output: - real address in the library of the function. |
| 1069 | note: the handle can be null, in which case shl_findsym will look for |
| 1070 | the symbol in all the loaded shared libraries. |
| 1071 | files to look at if you need reference on this stuff: |
| 1072 | dld.c, dld_shl_findsym.c |
| 1073 | end.c |
| 1074 | man entry for shl_findsym */ |
| 1075 | |
| 1076 | CORE_ADDR |
| 1077 | find_stub_with_shl_get (struct minimal_symbol *function, CORE_ADDR handle) |
| 1078 | { |
| 1079 | struct symbol *get_sym, *symbol2; |
| 1080 | struct minimal_symbol *buff_minsym, *msymbol; |
| 1081 | struct type *ftype; |
| 1082 | struct value **args; |
| 1083 | struct value *funcval; |
| 1084 | struct value *val; |
| 1085 | |
| 1086 | int x, namelen, err_value, tmp = -1; |
| 1087 | CORE_ADDR endo_buff_addr, value_return_addr, errno_return_addr; |
| 1088 | CORE_ADDR stub_addr; |
| 1089 | |
| 1090 | |
| 1091 | args = alloca (sizeof (struct value *) * 8); /* 6 for the arguments and one null one??? */ |
| 1092 | funcval = find_function_in_inferior ("__d_shl_get"); |
| 1093 | get_sym = lookup_symbol ("__d_shl_get", NULL, VAR_DOMAIN, NULL, NULL); |
| 1094 | buff_minsym = lookup_minimal_symbol ("__buffer", NULL, NULL); |
| 1095 | msymbol = lookup_minimal_symbol ("__shldp", NULL, NULL); |
| 1096 | symbol2 = lookup_symbol ("__shldp", NULL, VAR_DOMAIN, NULL, NULL); |
| 1097 | endo_buff_addr = SYMBOL_VALUE_ADDRESS (buff_minsym); |
| 1098 | namelen = strlen (DEPRECATED_SYMBOL_NAME (function)); |
| 1099 | value_return_addr = endo_buff_addr + namelen; |
| 1100 | ftype = check_typedef (SYMBOL_TYPE (get_sym)); |
| 1101 | |
| 1102 | /* do alignment */ |
| 1103 | if ((x = value_return_addr % 64) != 0) |
| 1104 | value_return_addr = value_return_addr + 64 - x; |
| 1105 | |
| 1106 | errno_return_addr = value_return_addr + 64; |
| 1107 | |
| 1108 | |
| 1109 | /* set up stuff needed by __d_shl_get in buffer in end.o */ |
| 1110 | |
| 1111 | target_write_memory (endo_buff_addr, DEPRECATED_SYMBOL_NAME (function), namelen); |
| 1112 | |
| 1113 | target_write_memory (value_return_addr, (char *) &tmp, 4); |
| 1114 | |
| 1115 | target_write_memory (errno_return_addr, (char *) &tmp, 4); |
| 1116 | |
| 1117 | target_write_memory (SYMBOL_VALUE_ADDRESS (msymbol), |
| 1118 | (char *) &handle, 4); |
| 1119 | |
| 1120 | /* now prepare the arguments for the call */ |
| 1121 | |
| 1122 | args[0] = value_from_longest (TYPE_FIELD_TYPE (ftype, 0), 12); |
| 1123 | args[1] = value_from_pointer (TYPE_FIELD_TYPE (ftype, 1), SYMBOL_VALUE_ADDRESS (msymbol)); |
| 1124 | args[2] = value_from_pointer (TYPE_FIELD_TYPE (ftype, 2), endo_buff_addr); |
| 1125 | args[3] = value_from_longest (TYPE_FIELD_TYPE (ftype, 3), TYPE_PROCEDURE); |
| 1126 | args[4] = value_from_pointer (TYPE_FIELD_TYPE (ftype, 4), value_return_addr); |
| 1127 | args[5] = value_from_pointer (TYPE_FIELD_TYPE (ftype, 5), errno_return_addr); |
| 1128 | |
| 1129 | /* now call the function */ |
| 1130 | |
| 1131 | val = call_function_by_hand (funcval, 6, args); |
| 1132 | |
| 1133 | /* now get the results */ |
| 1134 | |
| 1135 | target_read_memory (errno_return_addr, (char *) &err_value, sizeof (err_value)); |
| 1136 | |
| 1137 | target_read_memory (value_return_addr, (char *) &stub_addr, sizeof (stub_addr)); |
| 1138 | if (stub_addr <= 0) |
| 1139 | error ("call to __d_shl_get failed, error code is %d", err_value); |
| 1140 | |
| 1141 | return (stub_addr); |
| 1142 | } |
| 1143 | |
| 1144 | /* Cover routine for find_stub_with_shl_get to pass to catch_errors */ |
| 1145 | static int |
| 1146 | cover_find_stub_with_shl_get (void *args_untyped) |
| 1147 | { |
| 1148 | args_for_find_stub *args = args_untyped; |
| 1149 | args->return_val = find_stub_with_shl_get (args->msym, args->solib_handle); |
| 1150 | return 0; |
| 1151 | } |
| 1152 | |
| 1153 | /* Get the PC from %r31 if currently in a syscall. Also mask out privilege |
| 1154 | bits. */ |
| 1155 | |
| 1156 | CORE_ADDR |
| 1157 | hppa_target_read_pc (ptid_t ptid) |
| 1158 | { |
| 1159 | int flags = read_register_pid (FLAGS_REGNUM, ptid); |
| 1160 | |
| 1161 | /* The following test does not belong here. It is OS-specific, and belongs |
| 1162 | in native code. */ |
| 1163 | /* Test SS_INSYSCALL */ |
| 1164 | if (flags & 2) |
| 1165 | return read_register_pid (31, ptid) & ~0x3; |
| 1166 | |
| 1167 | return read_register_pid (PCOQ_HEAD_REGNUM, ptid) & ~0x3; |
| 1168 | } |
| 1169 | |
| 1170 | /* Write out the PC. If currently in a syscall, then also write the new |
| 1171 | PC value into %r31. */ |
| 1172 | |
| 1173 | void |
| 1174 | hppa_target_write_pc (CORE_ADDR v, ptid_t ptid) |
| 1175 | { |
| 1176 | int flags = read_register_pid (FLAGS_REGNUM, ptid); |
| 1177 | |
| 1178 | /* The following test does not belong here. It is OS-specific, and belongs |
| 1179 | in native code. */ |
| 1180 | /* If in a syscall, then set %r31. Also make sure to get the |
| 1181 | privilege bits set correctly. */ |
| 1182 | /* Test SS_INSYSCALL */ |
| 1183 | if (flags & 2) |
| 1184 | write_register_pid (31, v | 0x3, ptid); |
| 1185 | |
| 1186 | write_register_pid (PCOQ_HEAD_REGNUM, v, ptid); |
| 1187 | write_register_pid (PCOQ_TAIL_REGNUM, v + 4, ptid); |
| 1188 | } |
| 1189 | |
| 1190 | /* return the alignment of a type in bytes. Structures have the maximum |
| 1191 | alignment required by their fields. */ |
| 1192 | |
| 1193 | static int |
| 1194 | hppa_alignof (struct type *type) |
| 1195 | { |
| 1196 | int max_align, align, i; |
| 1197 | CHECK_TYPEDEF (type); |
| 1198 | switch (TYPE_CODE (type)) |
| 1199 | { |
| 1200 | case TYPE_CODE_PTR: |
| 1201 | case TYPE_CODE_INT: |
| 1202 | case TYPE_CODE_FLT: |
| 1203 | return TYPE_LENGTH (type); |
| 1204 | case TYPE_CODE_ARRAY: |
| 1205 | return hppa_alignof (TYPE_FIELD_TYPE (type, 0)); |
| 1206 | case TYPE_CODE_STRUCT: |
| 1207 | case TYPE_CODE_UNION: |
| 1208 | max_align = 1; |
| 1209 | for (i = 0; i < TYPE_NFIELDS (type); i++) |
| 1210 | { |
| 1211 | /* Bit fields have no real alignment. */ |
| 1212 | /* if (!TYPE_FIELD_BITPOS (type, i)) */ |
| 1213 | if (!TYPE_FIELD_BITSIZE (type, i)) /* elz: this should be bitsize */ |
| 1214 | { |
| 1215 | align = hppa_alignof (TYPE_FIELD_TYPE (type, i)); |
| 1216 | max_align = max (max_align, align); |
| 1217 | } |
| 1218 | } |
| 1219 | return max_align; |
| 1220 | default: |
| 1221 | return 4; |
| 1222 | } |
| 1223 | } |
| 1224 | |
| 1225 | /* Return one if PC is in the call path of a trampoline, else return zero. |
| 1226 | |
| 1227 | Note we return one for *any* call trampoline (long-call, arg-reloc), not |
| 1228 | just shared library trampolines (import, export). */ |
| 1229 | |
| 1230 | int |
| 1231 | hppa_in_solib_call_trampoline (CORE_ADDR pc, char *name) |
| 1232 | { |
| 1233 | struct minimal_symbol *minsym; |
| 1234 | struct unwind_table_entry *u; |
| 1235 | static CORE_ADDR dyncall = 0; |
| 1236 | static CORE_ADDR sr4export = 0; |
| 1237 | |
| 1238 | #ifdef GDB_TARGET_IS_HPPA_20W |
| 1239 | /* PA64 has a completely different stub/trampoline scheme. Is it |
| 1240 | better? Maybe. It's certainly harder to determine with any |
| 1241 | certainty that we are in a stub because we can not refer to the |
| 1242 | unwinders to help. |
| 1243 | |
| 1244 | The heuristic is simple. Try to lookup the current PC value in th |
| 1245 | minimal symbol table. If that fails, then assume we are not in a |
| 1246 | stub and return. |
| 1247 | |
| 1248 | Then see if the PC value falls within the section bounds for the |
| 1249 | section containing the minimal symbol we found in the first |
| 1250 | step. If it does, then assume we are not in a stub and return. |
| 1251 | |
| 1252 | Finally peek at the instructions to see if they look like a stub. */ |
| 1253 | { |
| 1254 | struct minimal_symbol *minsym; |
| 1255 | asection *sec; |
| 1256 | CORE_ADDR addr; |
| 1257 | int insn, i; |
| 1258 | |
| 1259 | minsym = lookup_minimal_symbol_by_pc (pc); |
| 1260 | if (! minsym) |
| 1261 | return 0; |
| 1262 | |
| 1263 | sec = SYMBOL_BFD_SECTION (minsym); |
| 1264 | |
| 1265 | if (bfd_get_section_vma (sec->owner, sec) <= pc |
| 1266 | && pc < (bfd_get_section_vma (sec->owner, sec) |
| 1267 | + bfd_section_size (sec->owner, sec))) |
| 1268 | return 0; |
| 1269 | |
| 1270 | /* We might be in a stub. Peek at the instructions. Stubs are 3 |
| 1271 | instructions long. */ |
| 1272 | insn = read_memory_integer (pc, 4); |
| 1273 | |
| 1274 | /* Find out where we think we are within the stub. */ |
| 1275 | if ((insn & 0xffffc00e) == 0x53610000) |
| 1276 | addr = pc; |
| 1277 | else if ((insn & 0xffffffff) == 0xe820d000) |
| 1278 | addr = pc - 4; |
| 1279 | else if ((insn & 0xffffc00e) == 0x537b0000) |
| 1280 | addr = pc - 8; |
| 1281 | else |
| 1282 | return 0; |
| 1283 | |
| 1284 | /* Now verify each insn in the range looks like a stub instruction. */ |
| 1285 | insn = read_memory_integer (addr, 4); |
| 1286 | if ((insn & 0xffffc00e) != 0x53610000) |
| 1287 | return 0; |
| 1288 | |
| 1289 | /* Now verify each insn in the range looks like a stub instruction. */ |
| 1290 | insn = read_memory_integer (addr + 4, 4); |
| 1291 | if ((insn & 0xffffffff) != 0xe820d000) |
| 1292 | return 0; |
| 1293 | |
| 1294 | /* Now verify each insn in the range looks like a stub instruction. */ |
| 1295 | insn = read_memory_integer (addr + 8, 4); |
| 1296 | if ((insn & 0xffffc00e) != 0x537b0000) |
| 1297 | return 0; |
| 1298 | |
| 1299 | /* Looks like a stub. */ |
| 1300 | return 1; |
| 1301 | } |
| 1302 | #endif |
| 1303 | |
| 1304 | /* FIXME XXX - dyncall and sr4export must be initialized whenever we get a |
| 1305 | new exec file */ |
| 1306 | |
| 1307 | /* First see if PC is in one of the two C-library trampolines. */ |
| 1308 | if (!dyncall) |
| 1309 | { |
| 1310 | minsym = lookup_minimal_symbol ("$$dyncall", NULL, NULL); |
| 1311 | if (minsym) |
| 1312 | dyncall = SYMBOL_VALUE_ADDRESS (minsym); |
| 1313 | else |
| 1314 | dyncall = -1; |
| 1315 | } |
| 1316 | |
| 1317 | if (!sr4export) |
| 1318 | { |
| 1319 | minsym = lookup_minimal_symbol ("_sr4export", NULL, NULL); |
| 1320 | if (minsym) |
| 1321 | sr4export = SYMBOL_VALUE_ADDRESS (minsym); |
| 1322 | else |
| 1323 | sr4export = -1; |
| 1324 | } |
| 1325 | |
| 1326 | if (pc == dyncall || pc == sr4export) |
| 1327 | return 1; |
| 1328 | |
| 1329 | minsym = lookup_minimal_symbol_by_pc (pc); |
| 1330 | if (minsym && strcmp (DEPRECATED_SYMBOL_NAME (minsym), ".stub") == 0) |
| 1331 | return 1; |
| 1332 | |
| 1333 | /* Get the unwind descriptor corresponding to PC, return zero |
| 1334 | if no unwind was found. */ |
| 1335 | u = find_unwind_entry (pc); |
| 1336 | if (!u) |
| 1337 | return 0; |
| 1338 | |
| 1339 | /* If this isn't a linker stub, then return now. */ |
| 1340 | if (u->stub_unwind.stub_type == 0) |
| 1341 | return 0; |
| 1342 | |
| 1343 | /* By definition a long-branch stub is a call stub. */ |
| 1344 | if (u->stub_unwind.stub_type == LONG_BRANCH) |
| 1345 | return 1; |
| 1346 | |
| 1347 | /* The call and return path execute the same instructions within |
| 1348 | an IMPORT stub! So an IMPORT stub is both a call and return |
| 1349 | trampoline. */ |
| 1350 | if (u->stub_unwind.stub_type == IMPORT) |
| 1351 | return 1; |
| 1352 | |
| 1353 | /* Parameter relocation stubs always have a call path and may have a |
| 1354 | return path. */ |
| 1355 | if (u->stub_unwind.stub_type == PARAMETER_RELOCATION |
| 1356 | || u->stub_unwind.stub_type == EXPORT) |
| 1357 | { |
| 1358 | CORE_ADDR addr; |
| 1359 | |
| 1360 | /* Search forward from the current PC until we hit a branch |
| 1361 | or the end of the stub. */ |
| 1362 | for (addr = pc; addr <= u->region_end; addr += 4) |
| 1363 | { |
| 1364 | unsigned long insn; |
| 1365 | |
| 1366 | insn = read_memory_integer (addr, 4); |
| 1367 | |
| 1368 | /* Does it look like a bl? If so then it's the call path, if |
| 1369 | we find a bv or be first, then we're on the return path. */ |
| 1370 | if ((insn & 0xfc00e000) == 0xe8000000) |
| 1371 | return 1; |
| 1372 | else if ((insn & 0xfc00e001) == 0xe800c000 |
| 1373 | || (insn & 0xfc000000) == 0xe0000000) |
| 1374 | return 0; |
| 1375 | } |
| 1376 | |
| 1377 | /* Should never happen. */ |
| 1378 | warning ("Unable to find branch in parameter relocation stub.\n"); |
| 1379 | return 0; |
| 1380 | } |
| 1381 | |
| 1382 | /* Unknown stub type. For now, just return zero. */ |
| 1383 | return 0; |
| 1384 | } |
| 1385 | |
| 1386 | /* Return one if PC is in the return path of a trampoline, else return zero. |
| 1387 | |
| 1388 | Note we return one for *any* call trampoline (long-call, arg-reloc), not |
| 1389 | just shared library trampolines (import, export). */ |
| 1390 | |
| 1391 | int |
| 1392 | hppa_in_solib_return_trampoline (CORE_ADDR pc, char *name) |
| 1393 | { |
| 1394 | struct unwind_table_entry *u; |
| 1395 | |
| 1396 | /* Get the unwind descriptor corresponding to PC, return zero |
| 1397 | if no unwind was found. */ |
| 1398 | u = find_unwind_entry (pc); |
| 1399 | if (!u) |
| 1400 | return 0; |
| 1401 | |
| 1402 | /* If this isn't a linker stub or it's just a long branch stub, then |
| 1403 | return zero. */ |
| 1404 | if (u->stub_unwind.stub_type == 0 || u->stub_unwind.stub_type == LONG_BRANCH) |
| 1405 | return 0; |
| 1406 | |
| 1407 | /* The call and return path execute the same instructions within |
| 1408 | an IMPORT stub! So an IMPORT stub is both a call and return |
| 1409 | trampoline. */ |
| 1410 | if (u->stub_unwind.stub_type == IMPORT) |
| 1411 | return 1; |
| 1412 | |
| 1413 | /* Parameter relocation stubs always have a call path and may have a |
| 1414 | return path. */ |
| 1415 | if (u->stub_unwind.stub_type == PARAMETER_RELOCATION |
| 1416 | || u->stub_unwind.stub_type == EXPORT) |
| 1417 | { |
| 1418 | CORE_ADDR addr; |
| 1419 | |
| 1420 | /* Search forward from the current PC until we hit a branch |
| 1421 | or the end of the stub. */ |
| 1422 | for (addr = pc; addr <= u->region_end; addr += 4) |
| 1423 | { |
| 1424 | unsigned long insn; |
| 1425 | |
| 1426 | insn = read_memory_integer (addr, 4); |
| 1427 | |
| 1428 | /* Does it look like a bl? If so then it's the call path, if |
| 1429 | we find a bv or be first, then we're on the return path. */ |
| 1430 | if ((insn & 0xfc00e000) == 0xe8000000) |
| 1431 | return 0; |
| 1432 | else if ((insn & 0xfc00e001) == 0xe800c000 |
| 1433 | || (insn & 0xfc000000) == 0xe0000000) |
| 1434 | return 1; |
| 1435 | } |
| 1436 | |
| 1437 | /* Should never happen. */ |
| 1438 | warning ("Unable to find branch in parameter relocation stub.\n"); |
| 1439 | return 0; |
| 1440 | } |
| 1441 | |
| 1442 | /* Unknown stub type. For now, just return zero. */ |
| 1443 | return 0; |
| 1444 | |
| 1445 | } |
| 1446 | |
| 1447 | /* Figure out if PC is in a trampoline, and if so find out where |
| 1448 | the trampoline will jump to. If not in a trampoline, return zero. |
| 1449 | |
| 1450 | Simple code examination probably is not a good idea since the code |
| 1451 | sequences in trampolines can also appear in user code. |
| 1452 | |
| 1453 | We use unwinds and information from the minimal symbol table to |
| 1454 | determine when we're in a trampoline. This won't work for ELF |
| 1455 | (yet) since it doesn't create stub unwind entries. Whether or |
| 1456 | not ELF will create stub unwinds or normal unwinds for linker |
| 1457 | stubs is still being debated. |
| 1458 | |
| 1459 | This should handle simple calls through dyncall or sr4export, |
| 1460 | long calls, argument relocation stubs, and dyncall/sr4export |
| 1461 | calling an argument relocation stub. It even handles some stubs |
| 1462 | used in dynamic executables. */ |
| 1463 | |
| 1464 | CORE_ADDR |
| 1465 | hppa_skip_trampoline_code (CORE_ADDR pc) |
| 1466 | { |
| 1467 | long orig_pc = pc; |
| 1468 | long prev_inst, curr_inst, loc; |
| 1469 | static CORE_ADDR dyncall = 0; |
| 1470 | static CORE_ADDR dyncall_external = 0; |
| 1471 | static CORE_ADDR sr4export = 0; |
| 1472 | struct minimal_symbol *msym; |
| 1473 | struct unwind_table_entry *u; |
| 1474 | |
| 1475 | /* FIXME XXX - dyncall and sr4export must be initialized whenever we get a |
| 1476 | new exec file */ |
| 1477 | |
| 1478 | if (!dyncall) |
| 1479 | { |
| 1480 | msym = lookup_minimal_symbol ("$$dyncall", NULL, NULL); |
| 1481 | if (msym) |
| 1482 | dyncall = SYMBOL_VALUE_ADDRESS (msym); |
| 1483 | else |
| 1484 | dyncall = -1; |
| 1485 | } |
| 1486 | |
| 1487 | if (!dyncall_external) |
| 1488 | { |
| 1489 | msym = lookup_minimal_symbol ("$$dyncall_external", NULL, NULL); |
| 1490 | if (msym) |
| 1491 | dyncall_external = SYMBOL_VALUE_ADDRESS (msym); |
| 1492 | else |
| 1493 | dyncall_external = -1; |
| 1494 | } |
| 1495 | |
| 1496 | if (!sr4export) |
| 1497 | { |
| 1498 | msym = lookup_minimal_symbol ("_sr4export", NULL, NULL); |
| 1499 | if (msym) |
| 1500 | sr4export = SYMBOL_VALUE_ADDRESS (msym); |
| 1501 | else |
| 1502 | sr4export = -1; |
| 1503 | } |
| 1504 | |
| 1505 | /* Addresses passed to dyncall may *NOT* be the actual address |
| 1506 | of the function. So we may have to do something special. */ |
| 1507 | if (pc == dyncall) |
| 1508 | { |
| 1509 | pc = (CORE_ADDR) read_register (22); |
| 1510 | |
| 1511 | /* If bit 30 (counting from the left) is on, then pc is the address of |
| 1512 | the PLT entry for this function, not the address of the function |
| 1513 | itself. Bit 31 has meaning too, but only for MPE. */ |
| 1514 | if (pc & 0x2) |
| 1515 | pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, TARGET_PTR_BIT / 8); |
| 1516 | } |
| 1517 | if (pc == dyncall_external) |
| 1518 | { |
| 1519 | pc = (CORE_ADDR) read_register (22); |
| 1520 | pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, TARGET_PTR_BIT / 8); |
| 1521 | } |
| 1522 | else if (pc == sr4export) |
| 1523 | pc = (CORE_ADDR) (read_register (22)); |
| 1524 | |
| 1525 | /* Get the unwind descriptor corresponding to PC, return zero |
| 1526 | if no unwind was found. */ |
| 1527 | u = find_unwind_entry (pc); |
| 1528 | if (!u) |
| 1529 | return 0; |
| 1530 | |
| 1531 | /* If this isn't a linker stub, then return now. */ |
| 1532 | /* elz: attention here! (FIXME) because of a compiler/linker |
| 1533 | error, some stubs which should have a non zero stub_unwind.stub_type |
| 1534 | have unfortunately a value of zero. So this function would return here |
| 1535 | as if we were not in a trampoline. To fix this, we go look at the partial |
| 1536 | symbol information, which reports this guy as a stub. |
| 1537 | (FIXME): Unfortunately, we are not that lucky: it turns out that the |
| 1538 | partial symbol information is also wrong sometimes. This is because |
| 1539 | when it is entered (somread.c::som_symtab_read()) it can happen that |
| 1540 | if the type of the symbol (from the som) is Entry, and the symbol is |
| 1541 | in a shared library, then it can also be a trampoline. This would |
| 1542 | be OK, except that I believe the way they decide if we are ina shared library |
| 1543 | does not work. SOOOO..., even if we have a regular function w/o trampolines |
| 1544 | its minimal symbol can be assigned type mst_solib_trampoline. |
| 1545 | Also, if we find that the symbol is a real stub, then we fix the unwind |
| 1546 | descriptor, and define the stub type to be EXPORT. |
| 1547 | Hopefully this is correct most of the times. */ |
| 1548 | if (u->stub_unwind.stub_type == 0) |
| 1549 | { |
| 1550 | |
| 1551 | /* elz: NOTE (FIXME!) once the problem with the unwind information is fixed |
| 1552 | we can delete all the code which appears between the lines */ |
| 1553 | /*--------------------------------------------------------------------------*/ |
| 1554 | msym = lookup_minimal_symbol_by_pc (pc); |
| 1555 | |
| 1556 | if (msym == NULL || MSYMBOL_TYPE (msym) != mst_solib_trampoline) |
| 1557 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1558 | |
| 1559 | else if (msym != NULL && MSYMBOL_TYPE (msym) == mst_solib_trampoline) |
| 1560 | { |
| 1561 | struct objfile *objfile; |
| 1562 | struct minimal_symbol *msymbol; |
| 1563 | int function_found = 0; |
| 1564 | |
| 1565 | /* go look if there is another minimal symbol with the same name as |
| 1566 | this one, but with type mst_text. This would happen if the msym |
| 1567 | is an actual trampoline, in which case there would be another |
| 1568 | symbol with the same name corresponding to the real function */ |
| 1569 | |
| 1570 | ALL_MSYMBOLS (objfile, msymbol) |
| 1571 | { |
| 1572 | if (MSYMBOL_TYPE (msymbol) == mst_text |
| 1573 | && DEPRECATED_STREQ (DEPRECATED_SYMBOL_NAME (msymbol), DEPRECATED_SYMBOL_NAME (msym))) |
| 1574 | { |
| 1575 | function_found = 1; |
| 1576 | break; |
| 1577 | } |
| 1578 | } |
| 1579 | |
| 1580 | if (function_found) |
| 1581 | /* the type of msym is correct (mst_solib_trampoline), but |
| 1582 | the unwind info is wrong, so set it to the correct value */ |
| 1583 | u->stub_unwind.stub_type = EXPORT; |
| 1584 | else |
| 1585 | /* the stub type info in the unwind is correct (this is not a |
| 1586 | trampoline), but the msym type information is wrong, it |
| 1587 | should be mst_text. So we need to fix the msym, and also |
| 1588 | get out of this function */ |
| 1589 | { |
| 1590 | MSYMBOL_TYPE (msym) = mst_text; |
| 1591 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1592 | } |
| 1593 | } |
| 1594 | |
| 1595 | /*--------------------------------------------------------------------------*/ |
| 1596 | } |
| 1597 | |
| 1598 | /* It's a stub. Search for a branch and figure out where it goes. |
| 1599 | Note we have to handle multi insn branch sequences like ldil;ble. |
| 1600 | Most (all?) other branches can be determined by examining the contents |
| 1601 | of certain registers and the stack. */ |
| 1602 | |
| 1603 | loc = pc; |
| 1604 | curr_inst = 0; |
| 1605 | prev_inst = 0; |
| 1606 | while (1) |
| 1607 | { |
| 1608 | /* Make sure we haven't walked outside the range of this stub. */ |
| 1609 | if (u != find_unwind_entry (loc)) |
| 1610 | { |
| 1611 | warning ("Unable to find branch in linker stub"); |
| 1612 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1613 | } |
| 1614 | |
| 1615 | prev_inst = curr_inst; |
| 1616 | curr_inst = read_memory_integer (loc, 4); |
| 1617 | |
| 1618 | /* Does it look like a branch external using %r1? Then it's the |
| 1619 | branch from the stub to the actual function. */ |
| 1620 | if ((curr_inst & 0xffe0e000) == 0xe0202000) |
| 1621 | { |
| 1622 | /* Yup. See if the previous instruction loaded |
| 1623 | a value into %r1. If so compute and return the jump address. */ |
| 1624 | if ((prev_inst & 0xffe00000) == 0x20200000) |
| 1625 | return (extract_21 (prev_inst) + extract_17 (curr_inst)) & ~0x3; |
| 1626 | else |
| 1627 | { |
| 1628 | warning ("Unable to find ldil X,%%r1 before ble Y(%%sr4,%%r1)."); |
| 1629 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1630 | } |
| 1631 | } |
| 1632 | |
| 1633 | /* Does it look like a be 0(sr0,%r21)? OR |
| 1634 | Does it look like a be, n 0(sr0,%r21)? OR |
| 1635 | Does it look like a bve (r21)? (this is on PA2.0) |
| 1636 | Does it look like a bve, n(r21)? (this is also on PA2.0) |
| 1637 | That's the branch from an |
| 1638 | import stub to an export stub. |
| 1639 | |
| 1640 | It is impossible to determine the target of the branch via |
| 1641 | simple examination of instructions and/or data (consider |
| 1642 | that the address in the plabel may be the address of the |
| 1643 | bind-on-reference routine in the dynamic loader). |
| 1644 | |
| 1645 | So we have try an alternative approach. |
| 1646 | |
| 1647 | Get the name of the symbol at our current location; it should |
| 1648 | be a stub symbol with the same name as the symbol in the |
| 1649 | shared library. |
| 1650 | |
| 1651 | Then lookup a minimal symbol with the same name; we should |
| 1652 | get the minimal symbol for the target routine in the shared |
| 1653 | library as those take precedence of import/export stubs. */ |
| 1654 | if ((curr_inst == 0xe2a00000) || |
| 1655 | (curr_inst == 0xe2a00002) || |
| 1656 | (curr_inst == 0xeaa0d000) || |
| 1657 | (curr_inst == 0xeaa0d002)) |
| 1658 | { |
| 1659 | struct minimal_symbol *stubsym, *libsym; |
| 1660 | |
| 1661 | stubsym = lookup_minimal_symbol_by_pc (loc); |
| 1662 | if (stubsym == NULL) |
| 1663 | { |
| 1664 | warning ("Unable to find symbol for 0x%lx", loc); |
| 1665 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1666 | } |
| 1667 | |
| 1668 | libsym = lookup_minimal_symbol (DEPRECATED_SYMBOL_NAME (stubsym), NULL, NULL); |
| 1669 | if (libsym == NULL) |
| 1670 | { |
| 1671 | warning ("Unable to find library symbol for %s\n", |
| 1672 | DEPRECATED_SYMBOL_NAME (stubsym)); |
| 1673 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1674 | } |
| 1675 | |
| 1676 | return SYMBOL_VALUE (libsym); |
| 1677 | } |
| 1678 | |
| 1679 | /* Does it look like bl X,%rp or bl X,%r0? Another way to do a |
| 1680 | branch from the stub to the actual function. */ |
| 1681 | /*elz */ |
| 1682 | else if ((curr_inst & 0xffe0e000) == 0xe8400000 |
| 1683 | || (curr_inst & 0xffe0e000) == 0xe8000000 |
| 1684 | || (curr_inst & 0xffe0e000) == 0xe800A000) |
| 1685 | return (loc + extract_17 (curr_inst) + 8) & ~0x3; |
| 1686 | |
| 1687 | /* Does it look like bv (rp)? Note this depends on the |
| 1688 | current stack pointer being the same as the stack |
| 1689 | pointer in the stub itself! This is a branch on from the |
| 1690 | stub back to the original caller. */ |
| 1691 | /*else if ((curr_inst & 0xffe0e000) == 0xe840c000) */ |
| 1692 | else if ((curr_inst & 0xffe0f000) == 0xe840c000) |
| 1693 | { |
| 1694 | /* Yup. See if the previous instruction loaded |
| 1695 | rp from sp - 8. */ |
| 1696 | if (prev_inst == 0x4bc23ff1) |
| 1697 | return (read_memory_integer |
| 1698 | (read_register (HPPA_SP_REGNUM) - 8, 4)) & ~0x3; |
| 1699 | else |
| 1700 | { |
| 1701 | warning ("Unable to find restore of %%rp before bv (%%rp)."); |
| 1702 | return orig_pc == pc ? 0 : pc & ~0x3; |
| 1703 | } |
| 1704 | } |
| 1705 | |
| 1706 | /* elz: added this case to capture the new instruction |
| 1707 | at the end of the return part of an export stub used by |
| 1708 | the PA2.0: BVE, n (rp) */ |
| 1709 | else if ((curr_inst & 0xffe0f000) == 0xe840d000) |
| 1710 | { |
| 1711 | return (read_memory_integer |
| 1712 | (read_register (HPPA_SP_REGNUM) - 24, TARGET_PTR_BIT / 8)) & ~0x3; |
| 1713 | } |
| 1714 | |
| 1715 | /* What about be,n 0(sr0,%rp)? It's just another way we return to |
| 1716 | the original caller from the stub. Used in dynamic executables. */ |
| 1717 | else if (curr_inst == 0xe0400002) |
| 1718 | { |
| 1719 | /* The value we jump to is sitting in sp - 24. But that's |
| 1720 | loaded several instructions before the be instruction. |
| 1721 | I guess we could check for the previous instruction being |
| 1722 | mtsp %r1,%sr0 if we want to do sanity checking. */ |
| 1723 | return (read_memory_integer |
| 1724 | (read_register (HPPA_SP_REGNUM) - 24, TARGET_PTR_BIT / 8)) & ~0x3; |
| 1725 | } |
| 1726 | |
| 1727 | /* Haven't found the branch yet, but we're still in the stub. |
| 1728 | Keep looking. */ |
| 1729 | loc += 4; |
| 1730 | } |
| 1731 | } |
| 1732 | |
| 1733 | |
| 1734 | /* For the given instruction (INST), return any adjustment it makes |
| 1735 | to the stack pointer or zero for no adjustment. |
| 1736 | |
| 1737 | This only handles instructions commonly found in prologues. */ |
| 1738 | |
| 1739 | static int |
| 1740 | prologue_inst_adjust_sp (unsigned long inst) |
| 1741 | { |
| 1742 | /* This must persist across calls. */ |
| 1743 | static int save_high21; |
| 1744 | |
| 1745 | /* The most common way to perform a stack adjustment ldo X(sp),sp */ |
| 1746 | if ((inst & 0xffffc000) == 0x37de0000) |
| 1747 | return extract_14 (inst); |
| 1748 | |
| 1749 | /* stwm X,D(sp) */ |
| 1750 | if ((inst & 0xffe00000) == 0x6fc00000) |
| 1751 | return extract_14 (inst); |
| 1752 | |
| 1753 | /* std,ma X,D(sp) */ |
| 1754 | if ((inst & 0xffe00008) == 0x73c00008) |
| 1755 | return (inst & 0x1 ? -1 << 13 : 0) | (((inst >> 4) & 0x3ff) << 3); |
| 1756 | |
| 1757 | /* addil high21,%r1; ldo low11,(%r1),%r30) |
| 1758 | save high bits in save_high21 for later use. */ |
| 1759 | if ((inst & 0xffe00000) == 0x28200000) |
| 1760 | { |
| 1761 | save_high21 = extract_21 (inst); |
| 1762 | return 0; |
| 1763 | } |
| 1764 | |
| 1765 | if ((inst & 0xffff0000) == 0x343e0000) |
| 1766 | return save_high21 + extract_14 (inst); |
| 1767 | |
| 1768 | /* fstws as used by the HP compilers. */ |
| 1769 | if ((inst & 0xffffffe0) == 0x2fd01220) |
| 1770 | return extract_5_load (inst); |
| 1771 | |
| 1772 | /* No adjustment. */ |
| 1773 | return 0; |
| 1774 | } |
| 1775 | |
| 1776 | /* Return nonzero if INST is a branch of some kind, else return zero. */ |
| 1777 | |
| 1778 | static int |
| 1779 | is_branch (unsigned long inst) |
| 1780 | { |
| 1781 | switch (inst >> 26) |
| 1782 | { |
| 1783 | case 0x20: |
| 1784 | case 0x21: |
| 1785 | case 0x22: |
| 1786 | case 0x23: |
| 1787 | case 0x27: |
| 1788 | case 0x28: |
| 1789 | case 0x29: |
| 1790 | case 0x2a: |
| 1791 | case 0x2b: |
| 1792 | case 0x2f: |
| 1793 | case 0x30: |
| 1794 | case 0x31: |
| 1795 | case 0x32: |
| 1796 | case 0x33: |
| 1797 | case 0x38: |
| 1798 | case 0x39: |
| 1799 | case 0x3a: |
| 1800 | case 0x3b: |
| 1801 | return 1; |
| 1802 | |
| 1803 | default: |
| 1804 | return 0; |
| 1805 | } |
| 1806 | } |
| 1807 | |
| 1808 | /* Return the register number for a GR which is saved by INST or |
| 1809 | zero it INST does not save a GR. */ |
| 1810 | |
| 1811 | static int |
| 1812 | inst_saves_gr (unsigned long inst) |
| 1813 | { |
| 1814 | /* Does it look like a stw? */ |
| 1815 | if ((inst >> 26) == 0x1a || (inst >> 26) == 0x1b |
| 1816 | || (inst >> 26) == 0x1f |
| 1817 | || ((inst >> 26) == 0x1f |
| 1818 | && ((inst >> 6) == 0xa))) |
| 1819 | return extract_5R_store (inst); |
| 1820 | |
| 1821 | /* Does it look like a std? */ |
| 1822 | if ((inst >> 26) == 0x1c |
| 1823 | || ((inst >> 26) == 0x03 |
| 1824 | && ((inst >> 6) & 0xf) == 0xb)) |
| 1825 | return extract_5R_store (inst); |
| 1826 | |
| 1827 | /* Does it look like a stwm? GCC & HPC may use this in prologues. */ |
| 1828 | if ((inst >> 26) == 0x1b) |
| 1829 | return extract_5R_store (inst); |
| 1830 | |
| 1831 | /* Does it look like sth or stb? HPC versions 9.0 and later use these |
| 1832 | too. */ |
| 1833 | if ((inst >> 26) == 0x19 || (inst >> 26) == 0x18 |
| 1834 | || ((inst >> 26) == 0x3 |
| 1835 | && (((inst >> 6) & 0xf) == 0x8 |
| 1836 | || (inst >> 6) & 0xf) == 0x9)) |
| 1837 | return extract_5R_store (inst); |
| 1838 | |
| 1839 | return 0; |
| 1840 | } |
| 1841 | |
| 1842 | /* Return the register number for a FR which is saved by INST or |
| 1843 | zero it INST does not save a FR. |
| 1844 | |
| 1845 | Note we only care about full 64bit register stores (that's the only |
| 1846 | kind of stores the prologue will use). |
| 1847 | |
| 1848 | FIXME: What about argument stores with the HP compiler in ANSI mode? */ |
| 1849 | |
| 1850 | static int |
| 1851 | inst_saves_fr (unsigned long inst) |
| 1852 | { |
| 1853 | /* is this an FSTD ? */ |
| 1854 | if ((inst & 0xfc00dfc0) == 0x2c001200) |
| 1855 | return extract_5r_store (inst); |
| 1856 | if ((inst & 0xfc000002) == 0x70000002) |
| 1857 | return extract_5R_store (inst); |
| 1858 | /* is this an FSTW ? */ |
| 1859 | if ((inst & 0xfc00df80) == 0x24001200) |
| 1860 | return extract_5r_store (inst); |
| 1861 | if ((inst & 0xfc000002) == 0x7c000000) |
| 1862 | return extract_5R_store (inst); |
| 1863 | return 0; |
| 1864 | } |
| 1865 | |
| 1866 | /* Advance PC across any function entry prologue instructions |
| 1867 | to reach some "real" code. |
| 1868 | |
| 1869 | Use information in the unwind table to determine what exactly should |
| 1870 | be in the prologue. */ |
| 1871 | |
| 1872 | |
| 1873 | CORE_ADDR |
| 1874 | skip_prologue_hard_way (CORE_ADDR pc) |
| 1875 | { |
| 1876 | char buf[4]; |
| 1877 | CORE_ADDR orig_pc = pc; |
| 1878 | unsigned long inst, stack_remaining, save_gr, save_fr, save_rp, save_sp; |
| 1879 | unsigned long args_stored, status, i, restart_gr, restart_fr; |
| 1880 | struct unwind_table_entry *u; |
| 1881 | |
| 1882 | restart_gr = 0; |
| 1883 | restart_fr = 0; |
| 1884 | |
| 1885 | restart: |
| 1886 | u = find_unwind_entry (pc); |
| 1887 | if (!u) |
| 1888 | return pc; |
| 1889 | |
| 1890 | /* If we are not at the beginning of a function, then return now. */ |
| 1891 | if ((pc & ~0x3) != u->region_start) |
| 1892 | return pc; |
| 1893 | |
| 1894 | /* This is how much of a frame adjustment we need to account for. */ |
| 1895 | stack_remaining = u->Total_frame_size << 3; |
| 1896 | |
| 1897 | /* Magic register saves we want to know about. */ |
| 1898 | save_rp = u->Save_RP; |
| 1899 | save_sp = u->Save_SP; |
| 1900 | |
| 1901 | /* An indication that args may be stored into the stack. Unfortunately |
| 1902 | the HPUX compilers tend to set this in cases where no args were |
| 1903 | stored too!. */ |
| 1904 | args_stored = 1; |
| 1905 | |
| 1906 | /* Turn the Entry_GR field into a bitmask. */ |
| 1907 | save_gr = 0; |
| 1908 | for (i = 3; i < u->Entry_GR + 3; i++) |
| 1909 | { |
| 1910 | /* Frame pointer gets saved into a special location. */ |
| 1911 | if (u->Save_SP && i == HPPA_FP_REGNUM) |
| 1912 | continue; |
| 1913 | |
| 1914 | save_gr |= (1 << i); |
| 1915 | } |
| 1916 | save_gr &= ~restart_gr; |
| 1917 | |
| 1918 | /* Turn the Entry_FR field into a bitmask too. */ |
| 1919 | save_fr = 0; |
| 1920 | for (i = 12; i < u->Entry_FR + 12; i++) |
| 1921 | save_fr |= (1 << i); |
| 1922 | save_fr &= ~restart_fr; |
| 1923 | |
| 1924 | /* Loop until we find everything of interest or hit a branch. |
| 1925 | |
| 1926 | For unoptimized GCC code and for any HP CC code this will never ever |
| 1927 | examine any user instructions. |
| 1928 | |
| 1929 | For optimzied GCC code we're faced with problems. GCC will schedule |
| 1930 | its prologue and make prologue instructions available for delay slot |
| 1931 | filling. The end result is user code gets mixed in with the prologue |
| 1932 | and a prologue instruction may be in the delay slot of the first branch |
| 1933 | or call. |
| 1934 | |
| 1935 | Some unexpected things are expected with debugging optimized code, so |
| 1936 | we allow this routine to walk past user instructions in optimized |
| 1937 | GCC code. */ |
| 1938 | while (save_gr || save_fr || save_rp || save_sp || stack_remaining > 0 |
| 1939 | || args_stored) |
| 1940 | { |
| 1941 | unsigned int reg_num; |
| 1942 | unsigned long old_stack_remaining, old_save_gr, old_save_fr; |
| 1943 | unsigned long old_save_rp, old_save_sp, next_inst; |
| 1944 | |
| 1945 | /* Save copies of all the triggers so we can compare them later |
| 1946 | (only for HPC). */ |
| 1947 | old_save_gr = save_gr; |
| 1948 | old_save_fr = save_fr; |
| 1949 | old_save_rp = save_rp; |
| 1950 | old_save_sp = save_sp; |
| 1951 | old_stack_remaining = stack_remaining; |
| 1952 | |
| 1953 | status = target_read_memory (pc, buf, 4); |
| 1954 | inst = extract_unsigned_integer (buf, 4); |
| 1955 | |
| 1956 | /* Yow! */ |
| 1957 | if (status != 0) |
| 1958 | return pc; |
| 1959 | |
| 1960 | /* Note the interesting effects of this instruction. */ |
| 1961 | stack_remaining -= prologue_inst_adjust_sp (inst); |
| 1962 | |
| 1963 | /* There are limited ways to store the return pointer into the |
| 1964 | stack. */ |
| 1965 | if (inst == 0x6bc23fd9 || inst == 0x0fc212c1) |
| 1966 | save_rp = 0; |
| 1967 | |
| 1968 | /* These are the only ways we save SP into the stack. At this time |
| 1969 | the HP compilers never bother to save SP into the stack. */ |
| 1970 | if ((inst & 0xffffc000) == 0x6fc10000 |
| 1971 | || (inst & 0xffffc00c) == 0x73c10008) |
| 1972 | save_sp = 0; |
| 1973 | |
| 1974 | /* Are we loading some register with an offset from the argument |
| 1975 | pointer? */ |
| 1976 | if ((inst & 0xffe00000) == 0x37a00000 |
| 1977 | || (inst & 0xffffffe0) == 0x081d0240) |
| 1978 | { |
| 1979 | pc += 4; |
| 1980 | continue; |
| 1981 | } |
| 1982 | |
| 1983 | /* Account for general and floating-point register saves. */ |
| 1984 | reg_num = inst_saves_gr (inst); |
| 1985 | save_gr &= ~(1 << reg_num); |
| 1986 | |
| 1987 | /* Ugh. Also account for argument stores into the stack. |
| 1988 | Unfortunately args_stored only tells us that some arguments |
| 1989 | where stored into the stack. Not how many or what kind! |
| 1990 | |
| 1991 | This is a kludge as on the HP compiler sets this bit and it |
| 1992 | never does prologue scheduling. So once we see one, skip past |
| 1993 | all of them. We have similar code for the fp arg stores below. |
| 1994 | |
| 1995 | FIXME. Can still die if we have a mix of GR and FR argument |
| 1996 | stores! */ |
| 1997 | if (reg_num >= (TARGET_PTR_BIT == 64 ? 19 : 23) && reg_num <= 26) |
| 1998 | { |
| 1999 | while (reg_num >= (TARGET_PTR_BIT == 64 ? 19 : 23) && reg_num <= 26) |
| 2000 | { |
| 2001 | pc += 4; |
| 2002 | status = target_read_memory (pc, buf, 4); |
| 2003 | inst = extract_unsigned_integer (buf, 4); |
| 2004 | if (status != 0) |
| 2005 | return pc; |
| 2006 | reg_num = inst_saves_gr (inst); |
| 2007 | } |
| 2008 | args_stored = 0; |
| 2009 | continue; |
| 2010 | } |
| 2011 | |
| 2012 | reg_num = inst_saves_fr (inst); |
| 2013 | save_fr &= ~(1 << reg_num); |
| 2014 | |
| 2015 | status = target_read_memory (pc + 4, buf, 4); |
| 2016 | next_inst = extract_unsigned_integer (buf, 4); |
| 2017 | |
| 2018 | /* Yow! */ |
| 2019 | if (status != 0) |
| 2020 | return pc; |
| 2021 | |
| 2022 | /* We've got to be read to handle the ldo before the fp register |
| 2023 | save. */ |
| 2024 | if ((inst & 0xfc000000) == 0x34000000 |
| 2025 | && inst_saves_fr (next_inst) >= 4 |
| 2026 | && inst_saves_fr (next_inst) <= (TARGET_PTR_BIT == 64 ? 11 : 7)) |
| 2027 | { |
| 2028 | /* So we drop into the code below in a reasonable state. */ |
| 2029 | reg_num = inst_saves_fr (next_inst); |
| 2030 | pc -= 4; |
| 2031 | } |
| 2032 | |
| 2033 | /* Ugh. Also account for argument stores into the stack. |
| 2034 | This is a kludge as on the HP compiler sets this bit and it |
| 2035 | never does prologue scheduling. So once we see one, skip past |
| 2036 | all of them. */ |
| 2037 | if (reg_num >= 4 && reg_num <= (TARGET_PTR_BIT == 64 ? 11 : 7)) |
| 2038 | { |
| 2039 | while (reg_num >= 4 && reg_num <= (TARGET_PTR_BIT == 64 ? 11 : 7)) |
| 2040 | { |
| 2041 | pc += 8; |
| 2042 | status = target_read_memory (pc, buf, 4); |
| 2043 | inst = extract_unsigned_integer (buf, 4); |
| 2044 | if (status != 0) |
| 2045 | return pc; |
| 2046 | if ((inst & 0xfc000000) != 0x34000000) |
| 2047 | break; |
| 2048 | status = target_read_memory (pc + 4, buf, 4); |
| 2049 | next_inst = extract_unsigned_integer (buf, 4); |
| 2050 | if (status != 0) |
| 2051 | return pc; |
| 2052 | reg_num = inst_saves_fr (next_inst); |
| 2053 | } |
| 2054 | args_stored = 0; |
| 2055 | continue; |
| 2056 | } |
| 2057 | |
| 2058 | /* Quit if we hit any kind of branch. This can happen if a prologue |
| 2059 | instruction is in the delay slot of the first call/branch. */ |
| 2060 | if (is_branch (inst)) |
| 2061 | break; |
| 2062 | |
| 2063 | /* What a crock. The HP compilers set args_stored even if no |
| 2064 | arguments were stored into the stack (boo hiss). This could |
| 2065 | cause this code to then skip a bunch of user insns (up to the |
| 2066 | first branch). |
| 2067 | |
| 2068 | To combat this we try to identify when args_stored was bogusly |
| 2069 | set and clear it. We only do this when args_stored is nonzero, |
| 2070 | all other resources are accounted for, and nothing changed on |
| 2071 | this pass. */ |
| 2072 | if (args_stored |
| 2073 | && !(save_gr || save_fr || save_rp || save_sp || stack_remaining > 0) |
| 2074 | && old_save_gr == save_gr && old_save_fr == save_fr |
| 2075 | && old_save_rp == save_rp && old_save_sp == save_sp |
| 2076 | && old_stack_remaining == stack_remaining) |
| 2077 | break; |
| 2078 | |
| 2079 | /* Bump the PC. */ |
| 2080 | pc += 4; |
| 2081 | } |
| 2082 | |
| 2083 | /* We've got a tenative location for the end of the prologue. However |
| 2084 | because of limitations in the unwind descriptor mechanism we may |
| 2085 | have went too far into user code looking for the save of a register |
| 2086 | that does not exist. So, if there registers we expected to be saved |
| 2087 | but never were, mask them out and restart. |
| 2088 | |
| 2089 | This should only happen in optimized code, and should be very rare. */ |
| 2090 | if (save_gr || (save_fr && !(restart_fr || restart_gr))) |
| 2091 | { |
| 2092 | pc = orig_pc; |
| 2093 | restart_gr = save_gr; |
| 2094 | restart_fr = save_fr; |
| 2095 | goto restart; |
| 2096 | } |
| 2097 | |
| 2098 | return pc; |
| 2099 | } |
| 2100 | |
| 2101 | |
| 2102 | /* Return the address of the PC after the last prologue instruction if |
| 2103 | we can determine it from the debug symbols. Else return zero. */ |
| 2104 | |
| 2105 | static CORE_ADDR |
| 2106 | after_prologue (CORE_ADDR pc) |
| 2107 | { |
| 2108 | struct symtab_and_line sal; |
| 2109 | CORE_ADDR func_addr, func_end; |
| 2110 | struct symbol *f; |
| 2111 | |
| 2112 | /* If we can not find the symbol in the partial symbol table, then |
| 2113 | there is no hope we can determine the function's start address |
| 2114 | with this code. */ |
| 2115 | if (!find_pc_partial_function (pc, NULL, &func_addr, &func_end)) |
| 2116 | return 0; |
| 2117 | |
| 2118 | /* Get the line associated with FUNC_ADDR. */ |
| 2119 | sal = find_pc_line (func_addr, 0); |
| 2120 | |
| 2121 | /* There are only two cases to consider. First, the end of the source line |
| 2122 | is within the function bounds. In that case we return the end of the |
| 2123 | source line. Second is the end of the source line extends beyond the |
| 2124 | bounds of the current function. We need to use the slow code to |
| 2125 | examine instructions in that case. |
| 2126 | |
| 2127 | Anything else is simply a bug elsewhere. Fixing it here is absolutely |
| 2128 | the wrong thing to do. In fact, it should be entirely possible for this |
| 2129 | function to always return zero since the slow instruction scanning code |
| 2130 | is supposed to *always* work. If it does not, then it is a bug. */ |
| 2131 | if (sal.end < func_end) |
| 2132 | return sal.end; |
| 2133 | else |
| 2134 | return 0; |
| 2135 | } |
| 2136 | |
| 2137 | /* To skip prologues, I use this predicate. Returns either PC itself |
| 2138 | if the code at PC does not look like a function prologue; otherwise |
| 2139 | returns an address that (if we're lucky) follows the prologue. If |
| 2140 | LENIENT, then we must skip everything which is involved in setting |
| 2141 | up the frame (it's OK to skip more, just so long as we don't skip |
| 2142 | anything which might clobber the registers which are being saved. |
| 2143 | Currently we must not skip more on the alpha, but we might the lenient |
| 2144 | stuff some day. */ |
| 2145 | |
| 2146 | CORE_ADDR |
| 2147 | hppa_skip_prologue (CORE_ADDR pc) |
| 2148 | { |
| 2149 | unsigned long inst; |
| 2150 | int offset; |
| 2151 | CORE_ADDR post_prologue_pc; |
| 2152 | char buf[4]; |
| 2153 | |
| 2154 | /* See if we can determine the end of the prologue via the symbol table. |
| 2155 | If so, then return either PC, or the PC after the prologue, whichever |
| 2156 | is greater. */ |
| 2157 | |
| 2158 | post_prologue_pc = after_prologue (pc); |
| 2159 | |
| 2160 | /* If after_prologue returned a useful address, then use it. Else |
| 2161 | fall back on the instruction skipping code. |
| 2162 | |
| 2163 | Some folks have claimed this causes problems because the breakpoint |
| 2164 | may be the first instruction of the prologue. If that happens, then |
| 2165 | the instruction skipping code has a bug that needs to be fixed. */ |
| 2166 | if (post_prologue_pc != 0) |
| 2167 | return max (pc, post_prologue_pc); |
| 2168 | else |
| 2169 | return (skip_prologue_hard_way (pc)); |
| 2170 | } |
| 2171 | |
| 2172 | struct hppa_frame_cache |
| 2173 | { |
| 2174 | CORE_ADDR base; |
| 2175 | struct trad_frame_saved_reg *saved_regs; |
| 2176 | }; |
| 2177 | |
| 2178 | static struct hppa_frame_cache * |
| 2179 | hppa_frame_cache (struct frame_info *next_frame, void **this_cache) |
| 2180 | { |
| 2181 | struct hppa_frame_cache *cache; |
| 2182 | long saved_gr_mask; |
| 2183 | long saved_fr_mask; |
| 2184 | CORE_ADDR this_sp; |
| 2185 | long frame_size; |
| 2186 | struct unwind_table_entry *u; |
| 2187 | int i; |
| 2188 | |
| 2189 | if ((*this_cache) != NULL) |
| 2190 | return (*this_cache); |
| 2191 | cache = FRAME_OBSTACK_ZALLOC (struct hppa_frame_cache); |
| 2192 | (*this_cache) = cache; |
| 2193 | cache->saved_regs = trad_frame_alloc_saved_regs (next_frame); |
| 2194 | |
| 2195 | /* Yow! */ |
| 2196 | u = find_unwind_entry (frame_func_unwind (next_frame)); |
| 2197 | if (!u) |
| 2198 | return (*this_cache); |
| 2199 | |
| 2200 | /* Turn the Entry_GR field into a bitmask. */ |
| 2201 | saved_gr_mask = 0; |
| 2202 | for (i = 3; i < u->Entry_GR + 3; i++) |
| 2203 | { |
| 2204 | /* Frame pointer gets saved into a special location. */ |
| 2205 | if (u->Save_SP && i == HPPA_FP_REGNUM) |
| 2206 | continue; |
| 2207 | |
| 2208 | saved_gr_mask |= (1 << i); |
| 2209 | } |
| 2210 | |
| 2211 | /* Turn the Entry_FR field into a bitmask too. */ |
| 2212 | saved_fr_mask = 0; |
| 2213 | for (i = 12; i < u->Entry_FR + 12; i++) |
| 2214 | saved_fr_mask |= (1 << i); |
| 2215 | |
| 2216 | /* Loop until we find everything of interest or hit a branch. |
| 2217 | |
| 2218 | For unoptimized GCC code and for any HP CC code this will never ever |
| 2219 | examine any user instructions. |
| 2220 | |
| 2221 | For optimized GCC code we're faced with problems. GCC will schedule |
| 2222 | its prologue and make prologue instructions available for delay slot |
| 2223 | filling. The end result is user code gets mixed in with the prologue |
| 2224 | and a prologue instruction may be in the delay slot of the first branch |
| 2225 | or call. |
| 2226 | |
| 2227 | Some unexpected things are expected with debugging optimized code, so |
| 2228 | we allow this routine to walk past user instructions in optimized |
| 2229 | GCC code. */ |
| 2230 | { |
| 2231 | int final_iteration = 0; |
| 2232 | CORE_ADDR pc; |
| 2233 | CORE_ADDR end_pc = skip_prologue_using_sal (pc); |
| 2234 | int looking_for_sp = u->Save_SP; |
| 2235 | int looking_for_rp = u->Save_RP; |
| 2236 | int fp_loc = -1; |
| 2237 | if (end_pc == 0) |
| 2238 | end_pc = frame_pc_unwind (next_frame); |
| 2239 | frame_size = 0; |
| 2240 | for (pc = frame_func_unwind (next_frame); |
| 2241 | ((saved_gr_mask || saved_fr_mask |
| 2242 | || looking_for_sp || looking_for_rp |
| 2243 | || frame_size < (u->Total_frame_size << 3)) |
| 2244 | && pc <= end_pc); |
| 2245 | pc += 4) |
| 2246 | { |
| 2247 | int reg; |
| 2248 | char buf4[4]; |
| 2249 | long status = target_read_memory (pc, buf4, sizeof buf4); |
| 2250 | long inst = extract_unsigned_integer (buf4, sizeof buf4); |
| 2251 | |
| 2252 | /* Note the interesting effects of this instruction. */ |
| 2253 | frame_size += prologue_inst_adjust_sp (inst); |
| 2254 | |
| 2255 | /* There are limited ways to store the return pointer into the |
| 2256 | stack. */ |
| 2257 | if (inst == 0x6bc23fd9) /* stw rp,-0x14(sr0,sp) */ |
| 2258 | { |
| 2259 | looking_for_rp = 0; |
| 2260 | cache->saved_regs[RP_REGNUM].addr = -20; |
| 2261 | } |
| 2262 | else if (inst == 0x0fc212c1) /* std rp,-0x10(sr0,sp) */ |
| 2263 | { |
| 2264 | looking_for_rp = 0; |
| 2265 | cache->saved_regs[RP_REGNUM].addr = -16; |
| 2266 | } |
| 2267 | |
| 2268 | /* Check to see if we saved SP into the stack. This also |
| 2269 | happens to indicate the location of the saved frame |
| 2270 | pointer. */ |
| 2271 | if ((inst & 0xffffc000) == 0x6fc10000 /* stw,ma r1,N(sr0,sp) */ |
| 2272 | || (inst & 0xffffc00c) == 0x73c10008) /* std,ma r1,N(sr0,sp) */ |
| 2273 | { |
| 2274 | looking_for_sp = 0; |
| 2275 | cache->saved_regs[HPPA_FP_REGNUM].addr = 0; |
| 2276 | } |
| 2277 | |
| 2278 | /* Account for general and floating-point register saves. */ |
| 2279 | reg = inst_saves_gr (inst); |
| 2280 | if (reg >= 3 && reg <= 18 |
| 2281 | && (!u->Save_SP || reg != HPPA_FP_REGNUM)) |
| 2282 | { |
| 2283 | saved_gr_mask &= ~(1 << reg); |
| 2284 | if ((inst >> 26) == 0x1b && extract_14 (inst) >= 0) |
| 2285 | /* stwm with a positive displacement is a _post_ |
| 2286 | _modify_. */ |
| 2287 | cache->saved_regs[reg].addr = 0; |
| 2288 | else if ((inst & 0xfc00000c) == 0x70000008) |
| 2289 | /* A std has explicit post_modify forms. */ |
| 2290 | cache->saved_regs[reg].addr = 0; |
| 2291 | else |
| 2292 | { |
| 2293 | CORE_ADDR offset; |
| 2294 | |
| 2295 | if ((inst >> 26) == 0x1c) |
| 2296 | offset = (inst & 0x1 ? -1 << 13 : 0) | (((inst >> 4) & 0x3ff) << 3); |
| 2297 | else if ((inst >> 26) == 0x03) |
| 2298 | offset = low_sign_extend (inst & 0x1f, 5); |
| 2299 | else |
| 2300 | offset = extract_14 (inst); |
| 2301 | |
| 2302 | /* Handle code with and without frame pointers. */ |
| 2303 | if (u->Save_SP) |
| 2304 | cache->saved_regs[reg].addr = offset; |
| 2305 | else |
| 2306 | cache->saved_regs[reg].addr = (u->Total_frame_size << 3) + offset; |
| 2307 | } |
| 2308 | } |
| 2309 | |
| 2310 | /* GCC handles callee saved FP regs a little differently. |
| 2311 | |
| 2312 | It emits an instruction to put the value of the start of |
| 2313 | the FP store area into %r1. It then uses fstds,ma with a |
| 2314 | basereg of %r1 for the stores. |
| 2315 | |
| 2316 | HP CC emits them at the current stack pointer modifying the |
| 2317 | stack pointer as it stores each register. */ |
| 2318 | |
| 2319 | /* ldo X(%r3),%r1 or ldo X(%r30),%r1. */ |
| 2320 | if ((inst & 0xffffc000) == 0x34610000 |
| 2321 | || (inst & 0xffffc000) == 0x37c10000) |
| 2322 | fp_loc = extract_14 (inst); |
| 2323 | |
| 2324 | reg = inst_saves_fr (inst); |
| 2325 | if (reg >= 12 && reg <= 21) |
| 2326 | { |
| 2327 | /* Note +4 braindamage below is necessary because the FP |
| 2328 | status registers are internally 8 registers rather than |
| 2329 | the expected 4 registers. */ |
| 2330 | saved_fr_mask &= ~(1 << reg); |
| 2331 | if (fp_loc == -1) |
| 2332 | { |
| 2333 | /* 1st HP CC FP register store. After this |
| 2334 | instruction we've set enough state that the GCC and |
| 2335 | HPCC code are both handled in the same manner. */ |
| 2336 | cache->saved_regs[reg + FP4_REGNUM + 4].addr = 0; |
| 2337 | fp_loc = 8; |
| 2338 | } |
| 2339 | else |
| 2340 | { |
| 2341 | cache->saved_regs[reg + HPPA_FP0_REGNUM + 4].addr = fp_loc; |
| 2342 | fp_loc += 8; |
| 2343 | } |
| 2344 | } |
| 2345 | |
| 2346 | /* Quit if we hit any kind of branch the previous iteration. */ |
| 2347 | if (final_iteration) |
| 2348 | break; |
| 2349 | /* We want to look precisely one instruction beyond the branch |
| 2350 | if we have not found everything yet. */ |
| 2351 | if (is_branch (inst)) |
| 2352 | final_iteration = 1; |
| 2353 | } |
| 2354 | } |
| 2355 | |
| 2356 | { |
| 2357 | /* The frame base always represents the value of %sp at entry to |
| 2358 | the current function (and is thus equivalent to the "saved" |
| 2359 | stack pointer. */ |
| 2360 | CORE_ADDR this_sp = frame_unwind_register_unsigned (next_frame, HPPA_SP_REGNUM); |
| 2361 | /* FIXME: cagney/2004-02-22: This assumes that the frame has been |
| 2362 | created. If it hasn't everything will be out-of-wack. */ |
| 2363 | if (u->Save_SP && trad_frame_addr_p (cache->saved_regs, HPPA_SP_REGNUM)) |
| 2364 | /* Both we're expecting the SP to be saved and the SP has been |
| 2365 | saved. The entry SP value is saved at this frame's SP |
| 2366 | address. */ |
| 2367 | cache->base = read_memory_integer (this_sp, TARGET_PTR_BIT / 8); |
| 2368 | else |
| 2369 | /* The prologue has been slowly allocating stack space. Adjust |
| 2370 | the SP back. */ |
| 2371 | cache->base = this_sp - frame_size; |
| 2372 | trad_frame_set_value (cache->saved_regs, HPPA_SP_REGNUM, cache->base); |
| 2373 | } |
| 2374 | |
| 2375 | /* The PC is found in the "return register", "Millicode" uses "r31" |
| 2376 | as the return register while normal code uses "rp". */ |
| 2377 | if (u->Millicode) |
| 2378 | cache->saved_regs[PCOQ_HEAD_REGNUM] = cache->saved_regs[31]; |
| 2379 | else |
| 2380 | cache->saved_regs[PCOQ_HEAD_REGNUM] = cache->saved_regs[RP_REGNUM]; |
| 2381 | |
| 2382 | { |
| 2383 | /* Convert all the offsets into addresses. */ |
| 2384 | int reg; |
| 2385 | for (reg = 0; reg < NUM_REGS; reg++) |
| 2386 | { |
| 2387 | if (trad_frame_addr_p (cache->saved_regs, reg)) |
| 2388 | cache->saved_regs[reg].addr += cache->base; |
| 2389 | } |
| 2390 | } |
| 2391 | |
| 2392 | return (*this_cache); |
| 2393 | } |
| 2394 | |
| 2395 | static void |
| 2396 | hppa_frame_this_id (struct frame_info *next_frame, void **this_cache, |
| 2397 | struct frame_id *this_id) |
| 2398 | { |
| 2399 | struct hppa_frame_cache *info = hppa_frame_cache (next_frame, this_cache); |
| 2400 | (*this_id) = frame_id_build (info->base, frame_func_unwind (next_frame)); |
| 2401 | } |
| 2402 | |
| 2403 | static void |
| 2404 | hppa_frame_prev_register (struct frame_info *next_frame, |
| 2405 | void **this_cache, |
| 2406 | int regnum, int *optimizedp, |
| 2407 | enum lval_type *lvalp, CORE_ADDR *addrp, |
| 2408 | int *realnump, void *valuep) |
| 2409 | { |
| 2410 | struct hppa_frame_cache *info = hppa_frame_cache (next_frame, this_cache); |
| 2411 | struct gdbarch *gdbarch = get_frame_arch (next_frame); |
| 2412 | if (regnum == PCOQ_TAIL_REGNUM) |
| 2413 | { |
| 2414 | /* The PCOQ TAIL, or NPC, needs to be computed from the unwound |
| 2415 | PC register. */ |
| 2416 | *optimizedp = 0; |
| 2417 | *lvalp = not_lval; |
| 2418 | *addrp = 0; |
| 2419 | *realnump = 0; |
| 2420 | if (valuep) |
| 2421 | { |
| 2422 | int regsize = register_size (gdbarch, PCOQ_HEAD_REGNUM); |
| 2423 | CORE_ADDR pc; |
| 2424 | int optimized; |
| 2425 | enum lval_type lval; |
| 2426 | CORE_ADDR addr; |
| 2427 | int realnum; |
| 2428 | bfd_byte value[MAX_REGISTER_SIZE]; |
| 2429 | trad_frame_prev_register (next_frame, info->saved_regs, |
| 2430 | PCOQ_HEAD_REGNUM, &optimized, &lval, &addr, |
| 2431 | &realnum, &value); |
| 2432 | pc = extract_unsigned_integer (&value, regsize); |
| 2433 | store_unsigned_integer (valuep, regsize, pc + 4); |
| 2434 | } |
| 2435 | } |
| 2436 | else |
| 2437 | { |
| 2438 | trad_frame_prev_register (next_frame, info->saved_regs, regnum, |
| 2439 | optimizedp, lvalp, addrp, realnump, valuep); |
| 2440 | } |
| 2441 | } |
| 2442 | |
| 2443 | static const struct frame_unwind hppa_frame_unwind = |
| 2444 | { |
| 2445 | NORMAL_FRAME, |
| 2446 | hppa_frame_this_id, |
| 2447 | hppa_frame_prev_register |
| 2448 | }; |
| 2449 | |
| 2450 | static const struct frame_unwind * |
| 2451 | hppa_frame_unwind_sniffer (struct frame_info *next_frame) |
| 2452 | { |
| 2453 | return &hppa_frame_unwind; |
| 2454 | } |
| 2455 | |
| 2456 | static CORE_ADDR |
| 2457 | hppa_frame_base_address (struct frame_info *next_frame, |
| 2458 | void **this_cache) |
| 2459 | { |
| 2460 | struct hppa_frame_cache *info = hppa_frame_cache (next_frame, |
| 2461 | this_cache); |
| 2462 | return info->base; |
| 2463 | } |
| 2464 | |
| 2465 | static const struct frame_base hppa_frame_base = { |
| 2466 | &hppa_frame_unwind, |
| 2467 | hppa_frame_base_address, |
| 2468 | hppa_frame_base_address, |
| 2469 | hppa_frame_base_address |
| 2470 | }; |
| 2471 | |
| 2472 | static const struct frame_base * |
| 2473 | hppa_frame_base_sniffer (struct frame_info *next_frame) |
| 2474 | { |
| 2475 | return &hppa_frame_base; |
| 2476 | } |
| 2477 | |
| 2478 | static struct frame_id |
| 2479 | hppa_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame) |
| 2480 | { |
| 2481 | return frame_id_build (frame_unwind_register_unsigned (next_frame, |
| 2482 | HPPA_SP_REGNUM), |
| 2483 | frame_pc_unwind (next_frame)); |
| 2484 | } |
| 2485 | |
| 2486 | static CORE_ADDR |
| 2487 | hppa_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame) |
| 2488 | { |
| 2489 | return frame_unwind_register_signed (next_frame, PCOQ_HEAD_REGNUM) & ~3; |
| 2490 | } |
| 2491 | |
| 2492 | /* Exception handling support for the HP-UX ANSI C++ compiler. |
| 2493 | The compiler (aCC) provides a callback for exception events; |
| 2494 | GDB can set a breakpoint on this callback and find out what |
| 2495 | exception event has occurred. */ |
| 2496 | |
| 2497 | /* The name of the hook to be set to point to the callback function */ |
| 2498 | static char HP_ACC_EH_notify_hook[] = "__eh_notify_hook"; |
| 2499 | /* The name of the function to be used to set the hook value */ |
| 2500 | static char HP_ACC_EH_set_hook_value[] = "__eh_set_hook_value"; |
| 2501 | /* The name of the callback function in end.o */ |
| 2502 | static char HP_ACC_EH_notify_callback[] = "__d_eh_notify_callback"; |
| 2503 | /* Name of function in end.o on which a break is set (called by above) */ |
| 2504 | static char HP_ACC_EH_break[] = "__d_eh_break"; |
| 2505 | /* Name of flag (in end.o) that enables catching throws */ |
| 2506 | static char HP_ACC_EH_catch_throw[] = "__d_eh_catch_throw"; |
| 2507 | /* Name of flag (in end.o) that enables catching catching */ |
| 2508 | static char HP_ACC_EH_catch_catch[] = "__d_eh_catch_catch"; |
| 2509 | /* The enum used by aCC */ |
| 2510 | typedef enum |
| 2511 | { |
| 2512 | __EH_NOTIFY_THROW, |
| 2513 | __EH_NOTIFY_CATCH |
| 2514 | } |
| 2515 | __eh_notification; |
| 2516 | |
| 2517 | /* Is exception-handling support available with this executable? */ |
| 2518 | static int hp_cxx_exception_support = 0; |
| 2519 | /* Has the initialize function been run? */ |
| 2520 | int hp_cxx_exception_support_initialized = 0; |
| 2521 | /* Similar to above, but imported from breakpoint.c -- non-target-specific */ |
| 2522 | extern int exception_support_initialized; |
| 2523 | /* Address of __eh_notify_hook */ |
| 2524 | static CORE_ADDR eh_notify_hook_addr = 0; |
| 2525 | /* Address of __d_eh_notify_callback */ |
| 2526 | static CORE_ADDR eh_notify_callback_addr = 0; |
| 2527 | /* Address of __d_eh_break */ |
| 2528 | static CORE_ADDR eh_break_addr = 0; |
| 2529 | /* Address of __d_eh_catch_catch */ |
| 2530 | static CORE_ADDR eh_catch_catch_addr = 0; |
| 2531 | /* Address of __d_eh_catch_throw */ |
| 2532 | static CORE_ADDR eh_catch_throw_addr = 0; |
| 2533 | /* Sal for __d_eh_break */ |
| 2534 | static struct symtab_and_line *break_callback_sal = 0; |
| 2535 | |
| 2536 | /* Code in end.c expects __d_pid to be set in the inferior, |
| 2537 | otherwise __d_eh_notify_callback doesn't bother to call |
| 2538 | __d_eh_break! So we poke the pid into this symbol |
| 2539 | ourselves. |
| 2540 | 0 => success |
| 2541 | 1 => failure */ |
| 2542 | int |
| 2543 | setup_d_pid_in_inferior (void) |
| 2544 | { |
| 2545 | CORE_ADDR anaddr; |
| 2546 | struct minimal_symbol *msymbol; |
| 2547 | char buf[4]; /* FIXME 32x64? */ |
| 2548 | |
| 2549 | /* Slam the pid of the process into __d_pid; failing is only a warning! */ |
| 2550 | msymbol = lookup_minimal_symbol ("__d_pid", NULL, symfile_objfile); |
| 2551 | if (msymbol == NULL) |
| 2552 | { |
| 2553 | warning ("Unable to find __d_pid symbol in object file."); |
| 2554 | warning ("Suggest linking executable with -g (links in /opt/langtools/lib/end.o)."); |
| 2555 | return 1; |
| 2556 | } |
| 2557 | |
| 2558 | anaddr = SYMBOL_VALUE_ADDRESS (msymbol); |
| 2559 | store_unsigned_integer (buf, 4, PIDGET (inferior_ptid)); /* FIXME 32x64? */ |
| 2560 | if (target_write_memory (anaddr, buf, 4)) /* FIXME 32x64? */ |
| 2561 | { |
| 2562 | warning ("Unable to write __d_pid"); |
| 2563 | warning ("Suggest linking executable with -g (links in /opt/langtools/lib/end.o)."); |
| 2564 | return 1; |
| 2565 | } |
| 2566 | return 0; |
| 2567 | } |
| 2568 | |
| 2569 | /* Initialize exception catchpoint support by looking for the |
| 2570 | necessary hooks/callbacks in end.o, etc., and set the hook value to |
| 2571 | point to the required debug function |
| 2572 | |
| 2573 | Return 0 => failure |
| 2574 | 1 => success */ |
| 2575 | |
| 2576 | static int |
| 2577 | initialize_hp_cxx_exception_support (void) |
| 2578 | { |
| 2579 | struct symtabs_and_lines sals; |
| 2580 | struct cleanup *old_chain; |
| 2581 | struct cleanup *canonical_strings_chain = NULL; |
| 2582 | int i; |
| 2583 | char *addr_start; |
| 2584 | char *addr_end = NULL; |
| 2585 | char **canonical = (char **) NULL; |
| 2586 | int thread = -1; |
| 2587 | struct symbol *sym = NULL; |
| 2588 | struct minimal_symbol *msym = NULL; |
| 2589 | struct objfile *objfile; |
| 2590 | asection *shlib_info; |
| 2591 | |
| 2592 | /* Detect and disallow recursion. On HP-UX with aCC, infinite |
| 2593 | recursion is a possibility because finding the hook for exception |
| 2594 | callbacks involves making a call in the inferior, which means |
| 2595 | re-inserting breakpoints which can re-invoke this code */ |
| 2596 | |
| 2597 | static int recurse = 0; |
| 2598 | if (recurse > 0) |
| 2599 | { |
| 2600 | hp_cxx_exception_support_initialized = 0; |
| 2601 | exception_support_initialized = 0; |
| 2602 | return 0; |
| 2603 | } |
| 2604 | |
| 2605 | hp_cxx_exception_support = 0; |
| 2606 | |
| 2607 | /* First check if we have seen any HP compiled objects; if not, |
| 2608 | it is very unlikely that HP's idiosyncratic callback mechanism |
| 2609 | for exception handling debug support will be available! |
| 2610 | This will percolate back up to breakpoint.c, where our callers |
| 2611 | will decide to try the g++ exception-handling support instead. */ |
| 2612 | if (!hp_som_som_object_present) |
| 2613 | return 0; |
| 2614 | |
| 2615 | /* We have a SOM executable with SOM debug info; find the hooks */ |
| 2616 | |
| 2617 | /* First look for the notify hook provided by aCC runtime libs */ |
| 2618 | /* If we find this symbol, we conclude that the executable must |
| 2619 | have HP aCC exception support built in. If this symbol is not |
| 2620 | found, even though we're a HP SOM-SOM file, we may have been |
| 2621 | built with some other compiler (not aCC). This results percolates |
| 2622 | back up to our callers in breakpoint.c which can decide to |
| 2623 | try the g++ style of exception support instead. |
| 2624 | If this symbol is found but the other symbols we require are |
| 2625 | not found, there is something weird going on, and g++ support |
| 2626 | should *not* be tried as an alternative. |
| 2627 | |
| 2628 | ASSUMPTION: Only HP aCC code will have __eh_notify_hook defined. |
| 2629 | ASSUMPTION: HP aCC and g++ modules cannot be linked together. */ |
| 2630 | |
| 2631 | /* libCsup has this hook; it'll usually be non-debuggable */ |
| 2632 | msym = lookup_minimal_symbol (HP_ACC_EH_notify_hook, NULL, NULL); |
| 2633 | if (msym) |
| 2634 | { |
| 2635 | eh_notify_hook_addr = SYMBOL_VALUE_ADDRESS (msym); |
| 2636 | hp_cxx_exception_support = 1; |
| 2637 | } |
| 2638 | else |
| 2639 | { |
| 2640 | warning ("Unable to find exception callback hook (%s).", HP_ACC_EH_notify_hook); |
| 2641 | warning ("Executable may not have been compiled debuggable with HP aCC."); |
| 2642 | warning ("GDB will be unable to intercept exception events."); |
| 2643 | eh_notify_hook_addr = 0; |
| 2644 | hp_cxx_exception_support = 0; |
| 2645 | return 0; |
| 2646 | } |
| 2647 | |
| 2648 | /* Next look for the notify callback routine in end.o */ |
| 2649 | /* This is always available in the SOM symbol dictionary if end.o is linked in */ |
| 2650 | msym = lookup_minimal_symbol (HP_ACC_EH_notify_callback, NULL, NULL); |
| 2651 | if (msym) |
| 2652 | { |
| 2653 | eh_notify_callback_addr = SYMBOL_VALUE_ADDRESS (msym); |
| 2654 | hp_cxx_exception_support = 1; |
| 2655 | } |
| 2656 | else |
| 2657 | { |
| 2658 | warning ("Unable to find exception callback routine (%s).", HP_ACC_EH_notify_callback); |
| 2659 | warning ("Suggest linking executable with -g (links in /opt/langtools/lib/end.o)."); |
| 2660 | warning ("GDB will be unable to intercept exception events."); |
| 2661 | eh_notify_callback_addr = 0; |
| 2662 | return 0; |
| 2663 | } |
| 2664 | |
| 2665 | #ifndef GDB_TARGET_IS_HPPA_20W |
| 2666 | /* Check whether the executable is dynamically linked or archive bound */ |
| 2667 | /* With an archive-bound executable we can use the raw addresses we find |
| 2668 | for the callback function, etc. without modification. For an executable |
| 2669 | with shared libraries, we have to do more work to find the plabel, which |
| 2670 | can be the target of a call through $$dyncall from the aCC runtime support |
| 2671 | library (libCsup) which is linked shared by default by aCC. */ |
| 2672 | /* This test below was copied from somsolib.c/somread.c. It may not be a very |
| 2673 | reliable one to test that an executable is linked shared. pai/1997-07-18 */ |
| 2674 | shlib_info = bfd_get_section_by_name (symfile_objfile->obfd, "$SHLIB_INFO$"); |
| 2675 | if (shlib_info && (bfd_section_size (symfile_objfile->obfd, shlib_info) != 0)) |
| 2676 | { |
| 2677 | /* The minsym we have has the local code address, but that's not the |
| 2678 | plabel that can be used by an inter-load-module call. */ |
| 2679 | /* Find solib handle for main image (which has end.o), and use that |
| 2680 | and the min sym as arguments to __d_shl_get() (which does the equivalent |
| 2681 | of shl_findsym()) to find the plabel. */ |
| 2682 | |
| 2683 | args_for_find_stub args; |
| 2684 | static char message[] = "Error while finding exception callback hook:\n"; |
| 2685 | |
| 2686 | args.solib_handle = som_solib_get_solib_by_pc (eh_notify_callback_addr); |
| 2687 | args.msym = msym; |
| 2688 | args.return_val = 0; |
| 2689 | |
| 2690 | recurse++; |
| 2691 | catch_errors (cover_find_stub_with_shl_get, &args, message, |
| 2692 | RETURN_MASK_ALL); |
| 2693 | eh_notify_callback_addr = args.return_val; |
| 2694 | recurse--; |
| 2695 | |
| 2696 | exception_catchpoints_are_fragile = 1; |
| 2697 | |
| 2698 | if (!eh_notify_callback_addr) |
| 2699 | { |
| 2700 | /* We can get here either if there is no plabel in the export list |
| 2701 | for the main image, or if something strange happened (?) */ |
| 2702 | warning ("Couldn't find a plabel (indirect function label) for the exception callback."); |
| 2703 | warning ("GDB will not be able to intercept exception events."); |
| 2704 | return 0; |
| 2705 | } |
| 2706 | } |
| 2707 | else |
| 2708 | exception_catchpoints_are_fragile = 0; |
| 2709 | #endif |
| 2710 | |
| 2711 | /* Now, look for the breakpointable routine in end.o */ |
| 2712 | /* This should also be available in the SOM symbol dict. if end.o linked in */ |
| 2713 | msym = lookup_minimal_symbol (HP_ACC_EH_break, NULL, NULL); |
| 2714 | if (msym) |
| 2715 | { |
| 2716 | eh_break_addr = SYMBOL_VALUE_ADDRESS (msym); |
| 2717 | hp_cxx_exception_support = 1; |
| 2718 | } |
| 2719 | else |
| 2720 | { |
| 2721 | warning ("Unable to find exception callback routine to set breakpoint (%s).", HP_ACC_EH_break); |
| 2722 | warning ("Suggest linking executable with -g (link in /opt/langtools/lib/end.o)."); |
| 2723 | warning ("GDB will be unable to intercept exception events."); |
| 2724 | eh_break_addr = 0; |
| 2725 | return 0; |
| 2726 | } |
| 2727 | |
| 2728 | /* Next look for the catch enable flag provided in end.o */ |
| 2729 | sym = lookup_symbol (HP_ACC_EH_catch_catch, (struct block *) NULL, |
| 2730 | VAR_DOMAIN, 0, (struct symtab **) NULL); |
| 2731 | if (sym) /* sometimes present in debug info */ |
| 2732 | { |
| 2733 | eh_catch_catch_addr = SYMBOL_VALUE_ADDRESS (sym); |
| 2734 | hp_cxx_exception_support = 1; |
| 2735 | } |
| 2736 | else |
| 2737 | /* otherwise look in SOM symbol dict. */ |
| 2738 | { |
| 2739 | msym = lookup_minimal_symbol (HP_ACC_EH_catch_catch, NULL, NULL); |
| 2740 | if (msym) |
| 2741 | { |
| 2742 | eh_catch_catch_addr = SYMBOL_VALUE_ADDRESS (msym); |
| 2743 | hp_cxx_exception_support = 1; |
| 2744 | } |
| 2745 | else |
| 2746 | { |
| 2747 | warning ("Unable to enable interception of exception catches."); |
| 2748 | warning ("Executable may not have been compiled debuggable with HP aCC."); |
| 2749 | warning ("Suggest linking executable with -g (link in /opt/langtools/lib/end.o)."); |
| 2750 | return 0; |
| 2751 | } |
| 2752 | } |
| 2753 | |
| 2754 | /* Next look for the catch enable flag provided end.o */ |
| 2755 | sym = lookup_symbol (HP_ACC_EH_catch_catch, (struct block *) NULL, |
| 2756 | VAR_DOMAIN, 0, (struct symtab **) NULL); |
| 2757 | if (sym) /* sometimes present in debug info */ |
| 2758 | { |
| 2759 | eh_catch_throw_addr = SYMBOL_VALUE_ADDRESS (sym); |
| 2760 | hp_cxx_exception_support = 1; |
| 2761 | } |
| 2762 | else |
| 2763 | /* otherwise look in SOM symbol dict. */ |
| 2764 | { |
| 2765 | msym = lookup_minimal_symbol (HP_ACC_EH_catch_throw, NULL, NULL); |
| 2766 | if (msym) |
| 2767 | { |
| 2768 | eh_catch_throw_addr = SYMBOL_VALUE_ADDRESS (msym); |
| 2769 | hp_cxx_exception_support = 1; |
| 2770 | } |
| 2771 | else |
| 2772 | { |
| 2773 | warning ("Unable to enable interception of exception throws."); |
| 2774 | warning ("Executable may not have been compiled debuggable with HP aCC."); |
| 2775 | warning ("Suggest linking executable with -g (link in /opt/langtools/lib/end.o)."); |
| 2776 | return 0; |
| 2777 | } |
| 2778 | } |
| 2779 | |
| 2780 | /* Set the flags */ |
| 2781 | hp_cxx_exception_support = 2; /* everything worked so far */ |
| 2782 | hp_cxx_exception_support_initialized = 1; |
| 2783 | exception_support_initialized = 1; |
| 2784 | |
| 2785 | return 1; |
| 2786 | } |
| 2787 | |
| 2788 | /* Target operation for enabling or disabling interception of |
| 2789 | exception events. |
| 2790 | KIND is either EX_EVENT_THROW or EX_EVENT_CATCH |
| 2791 | ENABLE is either 0 (disable) or 1 (enable). |
| 2792 | Return value is NULL if no support found; |
| 2793 | -1 if something went wrong, |
| 2794 | or a pointer to a symtab/line struct if the breakpointable |
| 2795 | address was found. */ |
| 2796 | |
| 2797 | struct symtab_and_line * |
| 2798 | child_enable_exception_callback (enum exception_event_kind kind, int enable) |
| 2799 | { |
| 2800 | char buf[4]; |
| 2801 | |
| 2802 | if (!exception_support_initialized || !hp_cxx_exception_support_initialized) |
| 2803 | if (!initialize_hp_cxx_exception_support ()) |
| 2804 | return NULL; |
| 2805 | |
| 2806 | switch (hp_cxx_exception_support) |
| 2807 | { |
| 2808 | case 0: |
| 2809 | /* Assuming no HP support at all */ |
| 2810 | return NULL; |
| 2811 | case 1: |
| 2812 | /* HP support should be present, but something went wrong */ |
| 2813 | return (struct symtab_and_line *) -1; /* yuck! */ |
| 2814 | /* there may be other cases in the future */ |
| 2815 | } |
| 2816 | |
| 2817 | /* Set the EH hook to point to the callback routine */ |
| 2818 | store_unsigned_integer (buf, 4, enable ? eh_notify_callback_addr : 0); /* FIXME 32x64 problem */ |
| 2819 | /* pai: (temp) FIXME should there be a pack operation first? */ |
| 2820 | if (target_write_memory (eh_notify_hook_addr, buf, 4)) /* FIXME 32x64 problem */ |
| 2821 | { |
| 2822 | warning ("Could not write to target memory for exception event callback."); |
| 2823 | warning ("Interception of exception events may not work."); |
| 2824 | return (struct symtab_and_line *) -1; |
| 2825 | } |
| 2826 | if (enable) |
| 2827 | { |
| 2828 | /* Ensure that __d_pid is set up correctly -- end.c code checks this. :-( */ |
| 2829 | if (PIDGET (inferior_ptid) > 0) |
| 2830 | { |
| 2831 | if (setup_d_pid_in_inferior ()) |
| 2832 | return (struct symtab_and_line *) -1; |
| 2833 | } |
| 2834 | else |
| 2835 | { |
| 2836 | warning ("Internal error: Invalid inferior pid? Cannot intercept exception events."); |
| 2837 | return (struct symtab_and_line *) -1; |
| 2838 | } |
| 2839 | } |
| 2840 | |
| 2841 | switch (kind) |
| 2842 | { |
| 2843 | case EX_EVENT_THROW: |
| 2844 | store_unsigned_integer (buf, 4, enable ? 1 : 0); |
| 2845 | if (target_write_memory (eh_catch_throw_addr, buf, 4)) /* FIXME 32x64? */ |
| 2846 | { |
| 2847 | warning ("Couldn't enable exception throw interception."); |
| 2848 | return (struct symtab_and_line *) -1; |
| 2849 | } |
| 2850 | break; |
| 2851 | case EX_EVENT_CATCH: |
| 2852 | store_unsigned_integer (buf, 4, enable ? 1 : 0); |
| 2853 | if (target_write_memory (eh_catch_catch_addr, buf, 4)) /* FIXME 32x64? */ |
| 2854 | { |
| 2855 | warning ("Couldn't enable exception catch interception."); |
| 2856 | return (struct symtab_and_line *) -1; |
| 2857 | } |
| 2858 | break; |
| 2859 | default: |
| 2860 | error ("Request to enable unknown or unsupported exception event."); |
| 2861 | } |
| 2862 | |
| 2863 | /* Copy break address into new sal struct, malloc'ing if needed. */ |
| 2864 | if (!break_callback_sal) |
| 2865 | { |
| 2866 | break_callback_sal = (struct symtab_and_line *) xmalloc (sizeof (struct symtab_and_line)); |
| 2867 | } |
| 2868 | init_sal (break_callback_sal); |
| 2869 | break_callback_sal->symtab = NULL; |
| 2870 | break_callback_sal->pc = eh_break_addr; |
| 2871 | break_callback_sal->line = 0; |
| 2872 | break_callback_sal->end = eh_break_addr; |
| 2873 | |
| 2874 | return break_callback_sal; |
| 2875 | } |
| 2876 | |
| 2877 | /* Record some information about the current exception event */ |
| 2878 | static struct exception_event_record current_ex_event; |
| 2879 | /* Convenience struct */ |
| 2880 | static struct symtab_and_line null_symtab_and_line = |
| 2881 | {NULL, 0, 0, 0}; |
| 2882 | |
| 2883 | /* Report current exception event. Returns a pointer to a record |
| 2884 | that describes the kind of the event, where it was thrown from, |
| 2885 | and where it will be caught. More information may be reported |
| 2886 | in the future */ |
| 2887 | struct exception_event_record * |
| 2888 | child_get_current_exception_event (void) |
| 2889 | { |
| 2890 | CORE_ADDR event_kind; |
| 2891 | CORE_ADDR throw_addr; |
| 2892 | CORE_ADDR catch_addr; |
| 2893 | struct frame_info *fi, *curr_frame; |
| 2894 | int level = 1; |
| 2895 | |
| 2896 | curr_frame = get_current_frame (); |
| 2897 | if (!curr_frame) |
| 2898 | return (struct exception_event_record *) NULL; |
| 2899 | |
| 2900 | /* Go up one frame to __d_eh_notify_callback, because at the |
| 2901 | point when this code is executed, there's garbage in the |
| 2902 | arguments of __d_eh_break. */ |
| 2903 | fi = find_relative_frame (curr_frame, &level); |
| 2904 | if (level != 0) |
| 2905 | return (struct exception_event_record *) NULL; |
| 2906 | |
| 2907 | select_frame (fi); |
| 2908 | |
| 2909 | /* Read in the arguments */ |
| 2910 | /* __d_eh_notify_callback() is called with 3 arguments: |
| 2911 | 1. event kind catch or throw |
| 2912 | 2. the target address if known |
| 2913 | 3. a flag -- not sure what this is. pai/1997-07-17 */ |
| 2914 | event_kind = read_register (ARG0_REGNUM); |
| 2915 | catch_addr = read_register (ARG1_REGNUM); |
| 2916 | |
| 2917 | /* Now go down to a user frame */ |
| 2918 | /* For a throw, __d_eh_break is called by |
| 2919 | __d_eh_notify_callback which is called by |
| 2920 | __notify_throw which is called |
| 2921 | from user code. |
| 2922 | For a catch, __d_eh_break is called by |
| 2923 | __d_eh_notify_callback which is called by |
| 2924 | <stackwalking stuff> which is called by |
| 2925 | __throw__<stuff> or __rethrow_<stuff> which is called |
| 2926 | from user code. */ |
| 2927 | /* FIXME: Don't use such magic numbers; search for the frames */ |
| 2928 | level = (event_kind == EX_EVENT_THROW) ? 3 : 4; |
| 2929 | fi = find_relative_frame (curr_frame, &level); |
| 2930 | if (level != 0) |
| 2931 | return (struct exception_event_record *) NULL; |
| 2932 | |
| 2933 | select_frame (fi); |
| 2934 | throw_addr = get_frame_pc (fi); |
| 2935 | |
| 2936 | /* Go back to original (top) frame */ |
| 2937 | select_frame (curr_frame); |
| 2938 | |
| 2939 | current_ex_event.kind = (enum exception_event_kind) event_kind; |
| 2940 | current_ex_event.throw_sal = find_pc_line (throw_addr, 1); |
| 2941 | current_ex_event.catch_sal = find_pc_line (catch_addr, 1); |
| 2942 | |
| 2943 | return ¤t_ex_event; |
| 2944 | } |
| 2945 | |
| 2946 | /* Instead of this nasty cast, add a method pvoid() that prints out a |
| 2947 | host VOID data type (remember %p isn't portable). */ |
| 2948 | |
| 2949 | static CORE_ADDR |
| 2950 | hppa_pointer_to_address_hack (void *ptr) |
| 2951 | { |
| 2952 | gdb_assert (sizeof (ptr) == TYPE_LENGTH (builtin_type_void_data_ptr)); |
| 2953 | return POINTER_TO_ADDRESS (builtin_type_void_data_ptr, &ptr); |
| 2954 | } |
| 2955 | |
| 2956 | static void |
| 2957 | unwind_command (char *exp, int from_tty) |
| 2958 | { |
| 2959 | CORE_ADDR address; |
| 2960 | struct unwind_table_entry *u; |
| 2961 | |
| 2962 | /* If we have an expression, evaluate it and use it as the address. */ |
| 2963 | |
| 2964 | if (exp != 0 && *exp != 0) |
| 2965 | address = parse_and_eval_address (exp); |
| 2966 | else |
| 2967 | return; |
| 2968 | |
| 2969 | u = find_unwind_entry (address); |
| 2970 | |
| 2971 | if (!u) |
| 2972 | { |
| 2973 | printf_unfiltered ("Can't find unwind table entry for %s\n", exp); |
| 2974 | return; |
| 2975 | } |
| 2976 | |
| 2977 | printf_unfiltered ("unwind_table_entry (0x%s):\n", |
| 2978 | paddr_nz (hppa_pointer_to_address_hack (u))); |
| 2979 | |
| 2980 | printf_unfiltered ("\tregion_start = "); |
| 2981 | print_address (u->region_start, gdb_stdout); |
| 2982 | |
| 2983 | printf_unfiltered ("\n\tregion_end = "); |
| 2984 | print_address (u->region_end, gdb_stdout); |
| 2985 | |
| 2986 | #define pif(FLD) if (u->FLD) printf_unfiltered (" "#FLD); |
| 2987 | |
| 2988 | printf_unfiltered ("\n\tflags ="); |
| 2989 | pif (Cannot_unwind); |
| 2990 | pif (Millicode); |
| 2991 | pif (Millicode_save_sr0); |
| 2992 | pif (Entry_SR); |
| 2993 | pif (Args_stored); |
| 2994 | pif (Variable_Frame); |
| 2995 | pif (Separate_Package_Body); |
| 2996 | pif (Frame_Extension_Millicode); |
| 2997 | pif (Stack_Overflow_Check); |
| 2998 | pif (Two_Instruction_SP_Increment); |
| 2999 | pif (Ada_Region); |
| 3000 | pif (Save_SP); |
| 3001 | pif (Save_RP); |
| 3002 | pif (Save_MRP_in_frame); |
| 3003 | pif (extn_ptr_defined); |
| 3004 | pif (Cleanup_defined); |
| 3005 | pif (MPE_XL_interrupt_marker); |
| 3006 | pif (HP_UX_interrupt_marker); |
| 3007 | pif (Large_frame); |
| 3008 | |
| 3009 | putchar_unfiltered ('\n'); |
| 3010 | |
| 3011 | #define pin(FLD) printf_unfiltered ("\t"#FLD" = 0x%x\n", u->FLD); |
| 3012 | |
| 3013 | pin (Region_description); |
| 3014 | pin (Entry_FR); |
| 3015 | pin (Entry_GR); |
| 3016 | pin (Total_frame_size); |
| 3017 | } |
| 3018 | |
| 3019 | void |
| 3020 | hppa_skip_permanent_breakpoint (void) |
| 3021 | { |
| 3022 | /* To step over a breakpoint instruction on the PA takes some |
| 3023 | fiddling with the instruction address queue. |
| 3024 | |
| 3025 | When we stop at a breakpoint, the IA queue front (the instruction |
| 3026 | we're executing now) points at the breakpoint instruction, and |
| 3027 | the IA queue back (the next instruction to execute) points to |
| 3028 | whatever instruction we would execute after the breakpoint, if it |
| 3029 | were an ordinary instruction. This is the case even if the |
| 3030 | breakpoint is in the delay slot of a branch instruction. |
| 3031 | |
| 3032 | Clearly, to step past the breakpoint, we need to set the queue |
| 3033 | front to the back. But what do we put in the back? What |
| 3034 | instruction comes after that one? Because of the branch delay |
| 3035 | slot, the next insn is always at the back + 4. */ |
| 3036 | write_register (PCOQ_HEAD_REGNUM, read_register (PCOQ_TAIL_REGNUM)); |
| 3037 | write_register (PCSQ_HEAD_REGNUM, read_register (PCSQ_TAIL_REGNUM)); |
| 3038 | |
| 3039 | write_register (PCOQ_TAIL_REGNUM, read_register (PCOQ_TAIL_REGNUM) + 4); |
| 3040 | /* We can leave the tail's space the same, since there's no jump. */ |
| 3041 | } |
| 3042 | |
| 3043 | int |
| 3044 | hppa_reg_struct_has_addr (int gcc_p, struct type *type) |
| 3045 | { |
| 3046 | /* On the PA, any pass-by-value structure > 8 bytes is actually passed |
| 3047 | via a pointer regardless of its type or the compiler used. */ |
| 3048 | return (TYPE_LENGTH (type) > 8); |
| 3049 | } |
| 3050 | |
| 3051 | int |
| 3052 | hppa_inner_than (CORE_ADDR lhs, CORE_ADDR rhs) |
| 3053 | { |
| 3054 | /* Stack grows upward */ |
| 3055 | return (lhs > rhs); |
| 3056 | } |
| 3057 | |
| 3058 | int |
| 3059 | hppa_pc_requires_run_before_use (CORE_ADDR pc) |
| 3060 | { |
| 3061 | /* Sometimes we may pluck out a minimal symbol that has a negative address. |
| 3062 | |
| 3063 | An example of this occurs when an a.out is linked against a foo.sl. |
| 3064 | The foo.sl defines a global bar(), and the a.out declares a signature |
| 3065 | for bar(). However, the a.out doesn't directly call bar(), but passes |
| 3066 | its address in another call. |
| 3067 | |
| 3068 | If you have this scenario and attempt to "break bar" before running, |
| 3069 | gdb will find a minimal symbol for bar() in the a.out. But that |
| 3070 | symbol's address will be negative. What this appears to denote is |
| 3071 | an index backwards from the base of the procedure linkage table (PLT) |
| 3072 | into the data linkage table (DLT), the end of which is contiguous |
| 3073 | with the start of the PLT. This is clearly not a valid address for |
| 3074 | us to set a breakpoint on. |
| 3075 | |
| 3076 | Note that one must be careful in how one checks for a negative address. |
| 3077 | 0xc0000000 is a legitimate address of something in a shared text |
| 3078 | segment, for example. Since I don't know what the possible range |
| 3079 | is of these "really, truly negative" addresses that come from the |
| 3080 | minimal symbols, I'm resorting to the gross hack of checking the |
| 3081 | top byte of the address for all 1's. Sigh. */ |
| 3082 | |
| 3083 | return (!target_has_stack && (pc & 0xFF000000)); |
| 3084 | } |
| 3085 | |
| 3086 | int |
| 3087 | hppa_instruction_nullified (void) |
| 3088 | { |
| 3089 | /* brobecker 2002/11/07: Couldn't we use a ULONGEST here? It would |
| 3090 | avoid the type cast. I'm leaving it as is for now as I'm doing |
| 3091 | semi-mechanical multiarching-related changes. */ |
| 3092 | const int ipsw = (int) read_register (IPSW_REGNUM); |
| 3093 | const int flags = (int) read_register (FLAGS_REGNUM); |
| 3094 | |
| 3095 | return ((ipsw & 0x00200000) && !(flags & 0x2)); |
| 3096 | } |
| 3097 | |
| 3098 | /* Return the GDB type object for the "standard" data type of data |
| 3099 | in register N. */ |
| 3100 | |
| 3101 | static struct type * |
| 3102 | hppa32_register_type (struct gdbarch *gdbarch, int reg_nr) |
| 3103 | { |
| 3104 | if (reg_nr < FP4_REGNUM) |
| 3105 | return builtin_type_uint32; |
| 3106 | else |
| 3107 | return builtin_type_ieee_single_big; |
| 3108 | } |
| 3109 | |
| 3110 | /* Return the GDB type object for the "standard" data type of data |
| 3111 | in register N. hppa64 version. */ |
| 3112 | |
| 3113 | static struct type * |
| 3114 | hppa64_register_type (struct gdbarch *gdbarch, int reg_nr) |
| 3115 | { |
| 3116 | if (reg_nr < FP4_REGNUM) |
| 3117 | return builtin_type_uint64; |
| 3118 | else |
| 3119 | return builtin_type_ieee_double_big; |
| 3120 | } |
| 3121 | |
| 3122 | /* Return True if REGNUM is not a register available to the user |
| 3123 | through ptrace(). */ |
| 3124 | |
| 3125 | int |
| 3126 | hppa_cannot_store_register (int regnum) |
| 3127 | { |
| 3128 | return (regnum == 0 |
| 3129 | || regnum == PCSQ_HEAD_REGNUM |
| 3130 | || (regnum >= PCSQ_TAIL_REGNUM && regnum < IPSW_REGNUM) |
| 3131 | || (regnum > IPSW_REGNUM && regnum < FP4_REGNUM)); |
| 3132 | |
| 3133 | } |
| 3134 | |
| 3135 | CORE_ADDR |
| 3136 | hppa_smash_text_address (CORE_ADDR addr) |
| 3137 | { |
| 3138 | /* The low two bits of the PC on the PA contain the privilege level. |
| 3139 | Some genius implementing a (non-GCC) compiler apparently decided |
| 3140 | this means that "addresses" in a text section therefore include a |
| 3141 | privilege level, and thus symbol tables should contain these bits. |
| 3142 | This seems like a bonehead thing to do--anyway, it seems to work |
| 3143 | for our purposes to just ignore those bits. */ |
| 3144 | |
| 3145 | return (addr &= ~0x3); |
| 3146 | } |
| 3147 | |
| 3148 | /* Get the ith function argument for the current function. */ |
| 3149 | CORE_ADDR |
| 3150 | hppa_fetch_pointer_argument (struct frame_info *frame, int argi, |
| 3151 | struct type *type) |
| 3152 | { |
| 3153 | CORE_ADDR addr; |
| 3154 | get_frame_register (frame, R0_REGNUM + 26 - argi, &addr); |
| 3155 | return addr; |
| 3156 | } |
| 3157 | |
| 3158 | /* Here is a table of C type sizes on hppa with various compiles |
| 3159 | and options. I measured this on PA 9000/800 with HP-UX 11.11 |
| 3160 | and these compilers: |
| 3161 | |
| 3162 | /usr/ccs/bin/cc HP92453-01 A.11.01.21 |
| 3163 | /opt/ansic/bin/cc HP92453-01 B.11.11.28706.GP |
| 3164 | /opt/aCC/bin/aCC B3910B A.03.45 |
| 3165 | gcc gcc 3.3.2 native hppa2.0w-hp-hpux11.11 |
| 3166 | |
| 3167 | cc : 1 2 4 4 8 : 4 8 -- : 4 4 |
| 3168 | ansic +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4 |
| 3169 | ansic +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4 |
| 3170 | ansic +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8 |
| 3171 | acc +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4 |
| 3172 | acc +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4 |
| 3173 | acc +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8 |
| 3174 | gcc : 1 2 4 4 8 : 4 8 16 : 4 4 |
| 3175 | |
| 3176 | Each line is: |
| 3177 | |
| 3178 | compiler and options |
| 3179 | char, short, int, long, long long |
| 3180 | float, double, long double |
| 3181 | char *, void (*)() |
| 3182 | |
| 3183 | So all these compilers use either ILP32 or LP64 model. |
| 3184 | TODO: gcc has more options so it needs more investigation. |
| 3185 | |
| 3186 | For floating point types, see: |
| 3187 | |
| 3188 | http://docs.hp.com/hpux/pdf/B3906-90006.pdf |
| 3189 | HP-UX floating-point guide, hpux 11.00 |
| 3190 | |
| 3191 | -- chastain 2003-12-18 */ |
| 3192 | |
| 3193 | static struct gdbarch * |
| 3194 | hppa_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) |
| 3195 | { |
| 3196 | struct gdbarch_tdep *tdep; |
| 3197 | struct gdbarch *gdbarch; |
| 3198 | |
| 3199 | /* Try to determine the ABI of the object we are loading. */ |
| 3200 | if (info.abfd != NULL && info.osabi == GDB_OSABI_UNKNOWN) |
| 3201 | { |
| 3202 | /* If it's a SOM file, assume it's HP/UX SOM. */ |
| 3203 | if (bfd_get_flavour (info.abfd) == bfd_target_som_flavour) |
| 3204 | info.osabi = GDB_OSABI_HPUX_SOM; |
| 3205 | } |
| 3206 | |
| 3207 | /* find a candidate among the list of pre-declared architectures. */ |
| 3208 | arches = gdbarch_list_lookup_by_info (arches, &info); |
| 3209 | if (arches != NULL) |
| 3210 | return (arches->gdbarch); |
| 3211 | |
| 3212 | /* If none found, then allocate and initialize one. */ |
| 3213 | tdep = XMALLOC (struct gdbarch_tdep); |
| 3214 | gdbarch = gdbarch_alloc (&info, tdep); |
| 3215 | |
| 3216 | /* Determine from the bfd_arch_info structure if we are dealing with |
| 3217 | a 32 or 64 bits architecture. If the bfd_arch_info is not available, |
| 3218 | then default to a 32bit machine. */ |
| 3219 | if (info.bfd_arch_info != NULL) |
| 3220 | tdep->bytes_per_address = |
| 3221 | info.bfd_arch_info->bits_per_address / info.bfd_arch_info->bits_per_byte; |
| 3222 | else |
| 3223 | tdep->bytes_per_address = 4; |
| 3224 | |
| 3225 | /* Some parts of the gdbarch vector depend on whether we are running |
| 3226 | on a 32 bits or 64 bits target. */ |
| 3227 | switch (tdep->bytes_per_address) |
| 3228 | { |
| 3229 | case 4: |
| 3230 | set_gdbarch_num_regs (gdbarch, hppa32_num_regs); |
| 3231 | set_gdbarch_register_name (gdbarch, hppa32_register_name); |
| 3232 | set_gdbarch_register_type (gdbarch, hppa32_register_type); |
| 3233 | break; |
| 3234 | case 8: |
| 3235 | set_gdbarch_num_regs (gdbarch, hppa64_num_regs); |
| 3236 | set_gdbarch_register_name (gdbarch, hppa64_register_name); |
| 3237 | set_gdbarch_register_type (gdbarch, hppa64_register_type); |
| 3238 | break; |
| 3239 | default: |
| 3240 | internal_error (__FILE__, __LINE__, "Unsupported address size: %d", |
| 3241 | tdep->bytes_per_address); |
| 3242 | } |
| 3243 | |
| 3244 | set_gdbarch_long_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT); |
| 3245 | set_gdbarch_ptr_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT); |
| 3246 | |
| 3247 | /* The following gdbarch vector elements are the same in both ILP32 |
| 3248 | and LP64, but might show differences some day. */ |
| 3249 | set_gdbarch_long_long_bit (gdbarch, 64); |
| 3250 | set_gdbarch_long_double_bit (gdbarch, 128); |
| 3251 | set_gdbarch_long_double_format (gdbarch, &floatformat_ia64_quad_big); |
| 3252 | |
| 3253 | /* The following gdbarch vector elements do not depend on the address |
| 3254 | size, or in any other gdbarch element previously set. */ |
| 3255 | set_gdbarch_skip_prologue (gdbarch, hppa_skip_prologue); |
| 3256 | set_gdbarch_skip_trampoline_code (gdbarch, hppa_skip_trampoline_code); |
| 3257 | set_gdbarch_in_solib_call_trampoline (gdbarch, hppa_in_solib_call_trampoline); |
| 3258 | set_gdbarch_in_solib_return_trampoline (gdbarch, |
| 3259 | hppa_in_solib_return_trampoline); |
| 3260 | set_gdbarch_inner_than (gdbarch, hppa_inner_than); |
| 3261 | set_gdbarch_sp_regnum (gdbarch, HPPA_SP_REGNUM); |
| 3262 | set_gdbarch_fp0_regnum (gdbarch, HPPA_FP0_REGNUM); |
| 3263 | set_gdbarch_cannot_store_register (gdbarch, hppa_cannot_store_register); |
| 3264 | set_gdbarch_addr_bits_remove (gdbarch, hppa_smash_text_address); |
| 3265 | set_gdbarch_smash_text_address (gdbarch, hppa_smash_text_address); |
| 3266 | set_gdbarch_believe_pcc_promotion (gdbarch, 1); |
| 3267 | set_gdbarch_read_pc (gdbarch, hppa_target_read_pc); |
| 3268 | set_gdbarch_write_pc (gdbarch, hppa_target_write_pc); |
| 3269 | |
| 3270 | /* Helper for function argument information. */ |
| 3271 | set_gdbarch_fetch_pointer_argument (gdbarch, hppa_fetch_pointer_argument); |
| 3272 | |
| 3273 | set_gdbarch_print_insn (gdbarch, print_insn_hppa); |
| 3274 | |
| 3275 | /* When a hardware watchpoint triggers, we'll move the inferior past |
| 3276 | it by removing all eventpoints; stepping past the instruction |
| 3277 | that caused the trigger; reinserting eventpoints; and checking |
| 3278 | whether any watched location changed. */ |
| 3279 | set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1); |
| 3280 | |
| 3281 | /* Inferior function call methods. */ |
| 3282 | switch (tdep->bytes_per_address) |
| 3283 | { |
| 3284 | case 4: |
| 3285 | set_gdbarch_push_dummy_call (gdbarch, hppa32_push_dummy_call); |
| 3286 | set_gdbarch_frame_align (gdbarch, hppa32_frame_align); |
| 3287 | break; |
| 3288 | case 8: |
| 3289 | set_gdbarch_push_dummy_call (gdbarch, hppa64_push_dummy_call); |
| 3290 | set_gdbarch_frame_align (gdbarch, hppa64_frame_align); |
| 3291 | break; |
| 3292 | default: |
| 3293 | internal_error (__FILE__, __LINE__, "bad switch"); |
| 3294 | } |
| 3295 | |
| 3296 | /* Struct return methods. */ |
| 3297 | switch (tdep->bytes_per_address) |
| 3298 | { |
| 3299 | case 4: |
| 3300 | set_gdbarch_return_value (gdbarch, hppa32_return_value); |
| 3301 | break; |
| 3302 | case 8: |
| 3303 | set_gdbarch_return_value (gdbarch, hppa64_return_value); |
| 3304 | break; |
| 3305 | default: |
| 3306 | internal_error (__FILE__, __LINE__, "bad switch"); |
| 3307 | } |
| 3308 | |
| 3309 | /* Frame unwind methods. */ |
| 3310 | set_gdbarch_unwind_dummy_id (gdbarch, hppa_unwind_dummy_id); |
| 3311 | set_gdbarch_unwind_pc (gdbarch, hppa_unwind_pc); |
| 3312 | frame_unwind_append_sniffer (gdbarch, hppa_frame_unwind_sniffer); |
| 3313 | frame_base_append_sniffer (gdbarch, hppa_frame_base_sniffer); |
| 3314 | |
| 3315 | /* Hook in ABI-specific overrides, if they have been registered. */ |
| 3316 | gdbarch_init_osabi (info, gdbarch); |
| 3317 | |
| 3318 | return gdbarch; |
| 3319 | } |
| 3320 | |
| 3321 | static void |
| 3322 | hppa_dump_tdep (struct gdbarch *current_gdbarch, struct ui_file *file) |
| 3323 | { |
| 3324 | /* Nothing to print for the moment. */ |
| 3325 | } |
| 3326 | |
| 3327 | void |
| 3328 | _initialize_hppa_tdep (void) |
| 3329 | { |
| 3330 | struct cmd_list_element *c; |
| 3331 | void break_at_finish_command (char *arg, int from_tty); |
| 3332 | void tbreak_at_finish_command (char *arg, int from_tty); |
| 3333 | void break_at_finish_at_depth_command (char *arg, int from_tty); |
| 3334 | |
| 3335 | gdbarch_register (bfd_arch_hppa, hppa_gdbarch_init, hppa_dump_tdep); |
| 3336 | |
| 3337 | add_cmd ("unwind", class_maintenance, unwind_command, |
| 3338 | "Print unwind table entry at given address.", |
| 3339 | &maintenanceprintlist); |
| 3340 | |
| 3341 | deprecate_cmd (add_com ("xbreak", class_breakpoint, |
| 3342 | break_at_finish_command, |
| 3343 | concat ("Set breakpoint at procedure exit. \n\ |
| 3344 | Argument may be function name, or \"*\" and an address.\n\ |
| 3345 | If function is specified, break at end of code for that function.\n\ |
| 3346 | If an address is specified, break at the end of the function that contains \n\ |
| 3347 | that exact address.\n", |
| 3348 | "With no arg, uses current execution address of selected stack frame.\n\ |
| 3349 | This is useful for breaking on return to a stack frame.\n\ |
| 3350 | \n\ |
| 3351 | Multiple breakpoints at one place are permitted, and useful if conditional.\n\ |
| 3352 | \n\ |
| 3353 | Do \"help breakpoints\" for info on other commands dealing with breakpoints.", NULL)), NULL); |
| 3354 | deprecate_cmd (add_com_alias ("xb", "xbreak", class_breakpoint, 1), NULL); |
| 3355 | deprecate_cmd (add_com_alias ("xbr", "xbreak", class_breakpoint, 1), NULL); |
| 3356 | deprecate_cmd (add_com_alias ("xbre", "xbreak", class_breakpoint, 1), NULL); |
| 3357 | deprecate_cmd (add_com_alias ("xbrea", "xbreak", class_breakpoint, 1), NULL); |
| 3358 | |
| 3359 | deprecate_cmd (c = add_com ("txbreak", class_breakpoint, |
| 3360 | tbreak_at_finish_command, |
| 3361 | "Set temporary breakpoint at procedure exit. Either there should\n\ |
| 3362 | be no argument or the argument must be a depth.\n"), NULL); |
| 3363 | set_cmd_completer (c, location_completer); |
| 3364 | |
| 3365 | if (xdb_commands) |
| 3366 | deprecate_cmd (add_com ("bx", class_breakpoint, |
| 3367 | break_at_finish_at_depth_command, |
| 3368 | "Set breakpoint at procedure exit. Either there should\n\ |
| 3369 | be no argument or the argument must be a depth.\n"), NULL); |
| 3370 | } |
| 3371 | |