2004-04-22 Randolph Chung <tausq@debian.org>
[deliverable/binutils-gdb.git] / gdb / hppa-tdep.c
1 /* Target-dependent code for the HP PA architecture, for GDB.
2
3 Copyright 1986, 1987, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
4 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software
5 Foundation, Inc.
6
7 Contributed by the Center for Software Science at the
8 University of Utah (pa-gdb-bugs@cs.utah.edu).
9
10 This file is part of GDB.
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
26
27 #include "defs.h"
28 #include "frame.h"
29 #include "bfd.h"
30 #include "inferior.h"
31 #include "value.h"
32 #include "regcache.h"
33 #include "completer.h"
34 #include "language.h"
35 #include "osabi.h"
36 #include "gdb_assert.h"
37 #include "infttrace.h"
38 #include "arch-utils.h"
39 /* For argument passing to the inferior */
40 #include "symtab.h"
41 #include "infcall.h"
42 #include "dis-asm.h"
43 #include "trad-frame.h"
44 #include "frame-unwind.h"
45 #include "frame-base.h"
46
47 #include "gdb_stat.h"
48 #include "gdb_wait.h"
49
50 #include "gdbcore.h"
51 #include "gdbcmd.h"
52 #include "target.h"
53 #include "symfile.h"
54 #include "objfiles.h"
55 #include "hppa-tdep.h"
56
57 static int hppa_debug = 0;
58
59 /* Some local constants. */
60 static const int hppa32_num_regs = 128;
61 static const int hppa64_num_regs = 96;
62
63 /* hppa-specific object data -- unwind and solib info.
64 TODO/maybe: think about splitting this into two parts; the unwind data is
65 common to all hppa targets, but is only used in this file; we can register
66 that separately and make this static. The solib data is probably hpux-
67 specific, so we can create a separate extern objfile_data that is registered
68 by hppa-hpux-tdep.c and shared with pa64solib.c and somsolib.c. */
69 const struct objfile_data *hppa_objfile_priv_data = NULL;
70
71 /* Get at various relevent fields of an instruction word. */
72 #define MASK_5 0x1f
73 #define MASK_11 0x7ff
74 #define MASK_14 0x3fff
75 #define MASK_21 0x1fffff
76
77 /* Define offsets into the call dummy for the _sr4export address.
78 See comments related to CALL_DUMMY for more info. */
79 #define SR4EXPORT_LDIL_OFFSET (HPPA_INSTRUCTION_SIZE * 12)
80 #define SR4EXPORT_LDO_OFFSET (HPPA_INSTRUCTION_SIZE * 13)
81
82 /* To support detection of the pseudo-initial frame
83 that threads have. */
84 #define THREAD_INITIAL_FRAME_SYMBOL "__pthread_exit"
85 #define THREAD_INITIAL_FRAME_SYM_LEN sizeof(THREAD_INITIAL_FRAME_SYMBOL)
86
87 /* Sizes (in bytes) of the native unwind entries. */
88 #define UNWIND_ENTRY_SIZE 16
89 #define STUB_UNWIND_ENTRY_SIZE 8
90
91 static int get_field (unsigned word, int from, int to);
92
93 static int extract_5_load (unsigned int);
94
95 static unsigned extract_5R_store (unsigned int);
96
97 static unsigned extract_5r_store (unsigned int);
98
99 struct unwind_table_entry *find_unwind_entry (CORE_ADDR);
100
101 static int extract_17 (unsigned int);
102
103 static int extract_21 (unsigned);
104
105 static int extract_14 (unsigned);
106
107 static void unwind_command (char *, int);
108
109 static int low_sign_extend (unsigned int, unsigned int);
110
111 static int sign_extend (unsigned int, unsigned int);
112
113 static int hppa_alignof (struct type *);
114
115 static int prologue_inst_adjust_sp (unsigned long);
116
117 static int is_branch (unsigned long);
118
119 static int inst_saves_gr (unsigned long);
120
121 static int inst_saves_fr (unsigned long);
122
123 static int compare_unwind_entries (const void *, const void *);
124
125 static void read_unwind_info (struct objfile *);
126
127 static void internalize_unwinds (struct objfile *,
128 struct unwind_table_entry *,
129 asection *, unsigned int,
130 unsigned int, CORE_ADDR);
131 static void record_text_segment_lowaddr (bfd *, asection *, void *);
132 /* FIXME: brobecker 2002-11-07: We will likely be able to make the
133 following functions static, once we hppa is partially multiarched. */
134 int hppa_pc_requires_run_before_use (CORE_ADDR pc);
135 int hppa_instruction_nullified (void);
136
137 /* Handle 32/64-bit struct return conventions. */
138
139 static enum return_value_convention
140 hppa32_return_value (struct gdbarch *gdbarch,
141 struct type *type, struct regcache *regcache,
142 void *readbuf, const void *writebuf)
143 {
144 if (TYPE_CODE (type) == TYPE_CODE_FLT)
145 {
146 if (readbuf != NULL)
147 regcache_cooked_read_part (regcache, FP4_REGNUM, 0,
148 TYPE_LENGTH (type), readbuf);
149 if (writebuf != NULL)
150 regcache_cooked_write_part (regcache, FP4_REGNUM, 0,
151 TYPE_LENGTH (type), writebuf);
152 return RETURN_VALUE_REGISTER_CONVENTION;
153 }
154 if (TYPE_LENGTH (type) <= 2 * 4)
155 {
156 /* The value always lives in the right hand end of the register
157 (or register pair)? */
158 int b;
159 int reg = 28;
160 int part = TYPE_LENGTH (type) % 4;
161 /* The left hand register contains only part of the value,
162 transfer that first so that the rest can be xfered as entire
163 4-byte registers. */
164 if (part > 0)
165 {
166 if (readbuf != NULL)
167 regcache_cooked_read_part (regcache, reg, 4 - part,
168 part, readbuf);
169 if (writebuf != NULL)
170 regcache_cooked_write_part (regcache, reg, 4 - part,
171 part, writebuf);
172 reg++;
173 }
174 /* Now transfer the remaining register values. */
175 for (b = part; b < TYPE_LENGTH (type); b += 4)
176 {
177 if (readbuf != NULL)
178 regcache_cooked_read (regcache, reg, (char *) readbuf + b);
179 if (writebuf != NULL)
180 regcache_cooked_write (regcache, reg, (const char *) writebuf + b);
181 reg++;
182 }
183 return RETURN_VALUE_REGISTER_CONVENTION;
184 }
185 else
186 return RETURN_VALUE_STRUCT_CONVENTION;
187 }
188
189 static enum return_value_convention
190 hppa64_return_value (struct gdbarch *gdbarch,
191 struct type *type, struct regcache *regcache,
192 void *readbuf, const void *writebuf)
193 {
194 /* RM: Floats are returned in FR4R, doubles in FR4. Integral values
195 are in r28, padded on the left. Aggregates less that 65 bits are
196 in r28, right padded. Aggregates upto 128 bits are in r28 and
197 r29, right padded. */
198 if (TYPE_CODE (type) == TYPE_CODE_FLT
199 && TYPE_LENGTH (type) <= 8)
200 {
201 /* Floats are right aligned? */
202 int offset = register_size (gdbarch, FP4_REGNUM) - TYPE_LENGTH (type);
203 if (readbuf != NULL)
204 regcache_cooked_read_part (regcache, FP4_REGNUM, offset,
205 TYPE_LENGTH (type), readbuf);
206 if (writebuf != NULL)
207 regcache_cooked_write_part (regcache, FP4_REGNUM, offset,
208 TYPE_LENGTH (type), writebuf);
209 return RETURN_VALUE_REGISTER_CONVENTION;
210 }
211 else if (TYPE_LENGTH (type) <= 8 && is_integral_type (type))
212 {
213 /* Integrals are right aligned. */
214 int offset = register_size (gdbarch, FP4_REGNUM) - TYPE_LENGTH (type);
215 if (readbuf != NULL)
216 regcache_cooked_read_part (regcache, 28, offset,
217 TYPE_LENGTH (type), readbuf);
218 if (writebuf != NULL)
219 regcache_cooked_write_part (regcache, 28, offset,
220 TYPE_LENGTH (type), writebuf);
221 return RETURN_VALUE_REGISTER_CONVENTION;
222 }
223 else if (TYPE_LENGTH (type) <= 2 * 8)
224 {
225 /* Composite values are left aligned. */
226 int b;
227 for (b = 0; b < TYPE_LENGTH (type); b += 8)
228 {
229 int part = min (8, TYPE_LENGTH (type) - b);
230 if (readbuf != NULL)
231 regcache_cooked_read_part (regcache, 28 + b / 8, 0, part,
232 (char *) readbuf + b);
233 if (writebuf != NULL)
234 regcache_cooked_write_part (regcache, 28 + b / 8, 0, part,
235 (const char *) writebuf + b);
236 }
237 return RETURN_VALUE_REGISTER_CONVENTION;
238 }
239 else
240 return RETURN_VALUE_STRUCT_CONVENTION;
241 }
242
243 /* Routines to extract various sized constants out of hppa
244 instructions. */
245
246 /* This assumes that no garbage lies outside of the lower bits of
247 value. */
248
249 static int
250 sign_extend (unsigned val, unsigned bits)
251 {
252 return (int) (val >> (bits - 1) ? (-1 << bits) | val : val);
253 }
254
255 /* For many immediate values the sign bit is the low bit! */
256
257 static int
258 low_sign_extend (unsigned val, unsigned bits)
259 {
260 return (int) ((val & 0x1 ? (-1 << (bits - 1)) : 0) | val >> 1);
261 }
262
263 /* Extract the bits at positions between FROM and TO, using HP's numbering
264 (MSB = 0). */
265
266 static int
267 get_field (unsigned word, int from, int to)
268 {
269 return ((word) >> (31 - (to)) & ((1 << ((to) - (from) + 1)) - 1));
270 }
271
272 /* extract the immediate field from a ld{bhw}s instruction */
273
274 static int
275 extract_5_load (unsigned word)
276 {
277 return low_sign_extend (word >> 16 & MASK_5, 5);
278 }
279
280 /* extract the immediate field from a break instruction */
281
282 static unsigned
283 extract_5r_store (unsigned word)
284 {
285 return (word & MASK_5);
286 }
287
288 /* extract the immediate field from a {sr}sm instruction */
289
290 static unsigned
291 extract_5R_store (unsigned word)
292 {
293 return (word >> 16 & MASK_5);
294 }
295
296 /* extract a 14 bit immediate field */
297
298 static int
299 extract_14 (unsigned word)
300 {
301 return low_sign_extend (word & MASK_14, 14);
302 }
303
304 /* extract a 21 bit constant */
305
306 static int
307 extract_21 (unsigned word)
308 {
309 int val;
310
311 word &= MASK_21;
312 word <<= 11;
313 val = get_field (word, 20, 20);
314 val <<= 11;
315 val |= get_field (word, 9, 19);
316 val <<= 2;
317 val |= get_field (word, 5, 6);
318 val <<= 5;
319 val |= get_field (word, 0, 4);
320 val <<= 2;
321 val |= get_field (word, 7, 8);
322 return sign_extend (val, 21) << 11;
323 }
324
325 /* extract a 17 bit constant from branch instructions, returning the
326 19 bit signed value. */
327
328 static int
329 extract_17 (unsigned word)
330 {
331 return sign_extend (get_field (word, 19, 28) |
332 get_field (word, 29, 29) << 10 |
333 get_field (word, 11, 15) << 11 |
334 (word & 0x1) << 16, 17) << 2;
335 }
336 \f
337
338 /* Compare the start address for two unwind entries returning 1 if
339 the first address is larger than the second, -1 if the second is
340 larger than the first, and zero if they are equal. */
341
342 static int
343 compare_unwind_entries (const void *arg1, const void *arg2)
344 {
345 const struct unwind_table_entry *a = arg1;
346 const struct unwind_table_entry *b = arg2;
347
348 if (a->region_start > b->region_start)
349 return 1;
350 else if (a->region_start < b->region_start)
351 return -1;
352 else
353 return 0;
354 }
355
356 static void
357 record_text_segment_lowaddr (bfd *abfd, asection *section, void *data)
358 {
359 if ((section->flags & (SEC_ALLOC | SEC_LOAD | SEC_READONLY))
360 == (SEC_ALLOC | SEC_LOAD | SEC_READONLY))
361 {
362 bfd_vma value = section->vma - section->filepos;
363 CORE_ADDR *low_text_segment_address = (CORE_ADDR *)data;
364
365 if (value < *low_text_segment_address)
366 *low_text_segment_address = value;
367 }
368 }
369
370 static void
371 internalize_unwinds (struct objfile *objfile, struct unwind_table_entry *table,
372 asection *section, unsigned int entries, unsigned int size,
373 CORE_ADDR text_offset)
374 {
375 /* We will read the unwind entries into temporary memory, then
376 fill in the actual unwind table. */
377
378 if (size > 0)
379 {
380 unsigned long tmp;
381 unsigned i;
382 char *buf = alloca (size);
383 CORE_ADDR low_text_segment_address;
384
385 /* For ELF targets, then unwinds are supposed to
386 be segment relative offsets instead of absolute addresses.
387
388 Note that when loading a shared library (text_offset != 0) the
389 unwinds are already relative to the text_offset that will be
390 passed in. */
391 if (gdbarch_tdep (current_gdbarch)->is_elf && text_offset == 0)
392 {
393 low_text_segment_address = -1;
394
395 bfd_map_over_sections (objfile->obfd,
396 record_text_segment_lowaddr,
397 &low_text_segment_address);
398
399 text_offset = low_text_segment_address;
400 }
401
402 bfd_get_section_contents (objfile->obfd, section, buf, 0, size);
403
404 /* Now internalize the information being careful to handle host/target
405 endian issues. */
406 for (i = 0; i < entries; i++)
407 {
408 table[i].region_start = bfd_get_32 (objfile->obfd,
409 (bfd_byte *) buf);
410 table[i].region_start += text_offset;
411 buf += 4;
412 table[i].region_end = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
413 table[i].region_end += text_offset;
414 buf += 4;
415 tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
416 buf += 4;
417 table[i].Cannot_unwind = (tmp >> 31) & 0x1;
418 table[i].Millicode = (tmp >> 30) & 0x1;
419 table[i].Millicode_save_sr0 = (tmp >> 29) & 0x1;
420 table[i].Region_description = (tmp >> 27) & 0x3;
421 table[i].reserved1 = (tmp >> 26) & 0x1;
422 table[i].Entry_SR = (tmp >> 25) & 0x1;
423 table[i].Entry_FR = (tmp >> 21) & 0xf;
424 table[i].Entry_GR = (tmp >> 16) & 0x1f;
425 table[i].Args_stored = (tmp >> 15) & 0x1;
426 table[i].Variable_Frame = (tmp >> 14) & 0x1;
427 table[i].Separate_Package_Body = (tmp >> 13) & 0x1;
428 table[i].Frame_Extension_Millicode = (tmp >> 12) & 0x1;
429 table[i].Stack_Overflow_Check = (tmp >> 11) & 0x1;
430 table[i].Two_Instruction_SP_Increment = (tmp >> 10) & 0x1;
431 table[i].Ada_Region = (tmp >> 9) & 0x1;
432 table[i].cxx_info = (tmp >> 8) & 0x1;
433 table[i].cxx_try_catch = (tmp >> 7) & 0x1;
434 table[i].sched_entry_seq = (tmp >> 6) & 0x1;
435 table[i].reserved2 = (tmp >> 5) & 0x1;
436 table[i].Save_SP = (tmp >> 4) & 0x1;
437 table[i].Save_RP = (tmp >> 3) & 0x1;
438 table[i].Save_MRP_in_frame = (tmp >> 2) & 0x1;
439 table[i].extn_ptr_defined = (tmp >> 1) & 0x1;
440 table[i].Cleanup_defined = tmp & 0x1;
441 tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
442 buf += 4;
443 table[i].MPE_XL_interrupt_marker = (tmp >> 31) & 0x1;
444 table[i].HP_UX_interrupt_marker = (tmp >> 30) & 0x1;
445 table[i].Large_frame = (tmp >> 29) & 0x1;
446 table[i].Pseudo_SP_Set = (tmp >> 28) & 0x1;
447 table[i].reserved4 = (tmp >> 27) & 0x1;
448 table[i].Total_frame_size = tmp & 0x7ffffff;
449
450 /* Stub unwinds are handled elsewhere. */
451 table[i].stub_unwind.stub_type = 0;
452 table[i].stub_unwind.padding = 0;
453 }
454 }
455 }
456
457 /* Read in the backtrace information stored in the `$UNWIND_START$' section of
458 the object file. This info is used mainly by find_unwind_entry() to find
459 out the stack frame size and frame pointer used by procedures. We put
460 everything on the psymbol obstack in the objfile so that it automatically
461 gets freed when the objfile is destroyed. */
462
463 static void
464 read_unwind_info (struct objfile *objfile)
465 {
466 asection *unwind_sec, *stub_unwind_sec;
467 unsigned unwind_size, stub_unwind_size, total_size;
468 unsigned index, unwind_entries;
469 unsigned stub_entries, total_entries;
470 CORE_ADDR text_offset;
471 struct hppa_unwind_info *ui;
472 struct hppa_objfile_private *obj_private;
473
474 text_offset = ANOFFSET (objfile->section_offsets, 0);
475 ui = (struct hppa_unwind_info *) obstack_alloc (&objfile->objfile_obstack,
476 sizeof (struct hppa_unwind_info));
477
478 ui->table = NULL;
479 ui->cache = NULL;
480 ui->last = -1;
481
482 /* For reasons unknown the HP PA64 tools generate multiple unwinder
483 sections in a single executable. So we just iterate over every
484 section in the BFD looking for unwinder sections intead of trying
485 to do a lookup with bfd_get_section_by_name.
486
487 First determine the total size of the unwind tables so that we
488 can allocate memory in a nice big hunk. */
489 total_entries = 0;
490 for (unwind_sec = objfile->obfd->sections;
491 unwind_sec;
492 unwind_sec = unwind_sec->next)
493 {
494 if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0
495 || strcmp (unwind_sec->name, ".PARISC.unwind") == 0)
496 {
497 unwind_size = bfd_section_size (objfile->obfd, unwind_sec);
498 unwind_entries = unwind_size / UNWIND_ENTRY_SIZE;
499
500 total_entries += unwind_entries;
501 }
502 }
503
504 /* Now compute the size of the stub unwinds. Note the ELF tools do not
505 use stub unwinds at the curren time. */
506 stub_unwind_sec = bfd_get_section_by_name (objfile->obfd, "$UNWIND_END$");
507
508 if (stub_unwind_sec)
509 {
510 stub_unwind_size = bfd_section_size (objfile->obfd, stub_unwind_sec);
511 stub_entries = stub_unwind_size / STUB_UNWIND_ENTRY_SIZE;
512 }
513 else
514 {
515 stub_unwind_size = 0;
516 stub_entries = 0;
517 }
518
519 /* Compute total number of unwind entries and their total size. */
520 total_entries += stub_entries;
521 total_size = total_entries * sizeof (struct unwind_table_entry);
522
523 /* Allocate memory for the unwind table. */
524 ui->table = (struct unwind_table_entry *)
525 obstack_alloc (&objfile->objfile_obstack, total_size);
526 ui->last = total_entries - 1;
527
528 /* Now read in each unwind section and internalize the standard unwind
529 entries. */
530 index = 0;
531 for (unwind_sec = objfile->obfd->sections;
532 unwind_sec;
533 unwind_sec = unwind_sec->next)
534 {
535 if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0
536 || strcmp (unwind_sec->name, ".PARISC.unwind") == 0)
537 {
538 unwind_size = bfd_section_size (objfile->obfd, unwind_sec);
539 unwind_entries = unwind_size / UNWIND_ENTRY_SIZE;
540
541 internalize_unwinds (objfile, &ui->table[index], unwind_sec,
542 unwind_entries, unwind_size, text_offset);
543 index += unwind_entries;
544 }
545 }
546
547 /* Now read in and internalize the stub unwind entries. */
548 if (stub_unwind_size > 0)
549 {
550 unsigned int i;
551 char *buf = alloca (stub_unwind_size);
552
553 /* Read in the stub unwind entries. */
554 bfd_get_section_contents (objfile->obfd, stub_unwind_sec, buf,
555 0, stub_unwind_size);
556
557 /* Now convert them into regular unwind entries. */
558 for (i = 0; i < stub_entries; i++, index++)
559 {
560 /* Clear out the next unwind entry. */
561 memset (&ui->table[index], 0, sizeof (struct unwind_table_entry));
562
563 /* Convert offset & size into region_start and region_end.
564 Stuff away the stub type into "reserved" fields. */
565 ui->table[index].region_start = bfd_get_32 (objfile->obfd,
566 (bfd_byte *) buf);
567 ui->table[index].region_start += text_offset;
568 buf += 4;
569 ui->table[index].stub_unwind.stub_type = bfd_get_8 (objfile->obfd,
570 (bfd_byte *) buf);
571 buf += 2;
572 ui->table[index].region_end
573 = ui->table[index].region_start + 4 *
574 (bfd_get_16 (objfile->obfd, (bfd_byte *) buf) - 1);
575 buf += 2;
576 }
577
578 }
579
580 /* Unwind table needs to be kept sorted. */
581 qsort (ui->table, total_entries, sizeof (struct unwind_table_entry),
582 compare_unwind_entries);
583
584 /* Keep a pointer to the unwind information. */
585 obj_private = (struct hppa_objfile_private *)
586 objfile_data (objfile, hppa_objfile_priv_data);
587 if (obj_private == NULL)
588 {
589 obj_private = (struct hppa_objfile_private *)
590 obstack_alloc (&objfile->objfile_obstack,
591 sizeof (struct hppa_objfile_private));
592 set_objfile_data (objfile, hppa_objfile_priv_data, obj_private);
593 obj_private->unwind_info = NULL;
594 obj_private->so_info = NULL;
595 obj_private->dp = 0;
596 }
597 obj_private->unwind_info = ui;
598 }
599
600 /* Lookup the unwind (stack backtrace) info for the given PC. We search all
601 of the objfiles seeking the unwind table entry for this PC. Each objfile
602 contains a sorted list of struct unwind_table_entry. Since we do a binary
603 search of the unwind tables, we depend upon them to be sorted. */
604
605 struct unwind_table_entry *
606 find_unwind_entry (CORE_ADDR pc)
607 {
608 int first, middle, last;
609 struct objfile *objfile;
610 struct hppa_objfile_private *priv;
611
612 if (hppa_debug)
613 fprintf_unfiltered (gdb_stdlog, "{ find_unwind_entry 0x%s -> ",
614 paddr_nz (pc));
615
616 /* A function at address 0? Not in HP-UX! */
617 if (pc == (CORE_ADDR) 0)
618 {
619 if (hppa_debug)
620 fprintf_unfiltered (gdb_stdlog, "NULL }\n");
621 return NULL;
622 }
623
624 ALL_OBJFILES (objfile)
625 {
626 struct hppa_unwind_info *ui;
627 ui = NULL;
628 priv = objfile_data (objfile, hppa_objfile_priv_data);
629 if (priv)
630 ui = ((struct hppa_objfile_private *) priv)->unwind_info;
631
632 if (!ui)
633 {
634 read_unwind_info (objfile);
635 priv = objfile_data (objfile, hppa_objfile_priv_data);
636 if (priv == NULL)
637 error ("Internal error reading unwind information.");
638 ui = ((struct hppa_objfile_private *) priv)->unwind_info;
639 }
640
641 /* First, check the cache */
642
643 if (ui->cache
644 && pc >= ui->cache->region_start
645 && pc <= ui->cache->region_end)
646 {
647 if (hppa_debug)
648 fprintf_unfiltered (gdb_stdlog, "0x%s (cached) }\n",
649 paddr_nz ((CORE_ADDR) ui->cache));
650 return ui->cache;
651 }
652
653 /* Not in the cache, do a binary search */
654
655 first = 0;
656 last = ui->last;
657
658 while (first <= last)
659 {
660 middle = (first + last) / 2;
661 if (pc >= ui->table[middle].region_start
662 && pc <= ui->table[middle].region_end)
663 {
664 ui->cache = &ui->table[middle];
665 if (hppa_debug)
666 fprintf_unfiltered (gdb_stdlog, "0x%s }\n",
667 paddr_nz ((CORE_ADDR) ui->cache));
668 return &ui->table[middle];
669 }
670
671 if (pc < ui->table[middle].region_start)
672 last = middle - 1;
673 else
674 first = middle + 1;
675 }
676 } /* ALL_OBJFILES() */
677
678 if (hppa_debug)
679 fprintf_unfiltered (gdb_stdlog, "NULL (not found) }\n");
680
681 return NULL;
682 }
683
684 static const unsigned char *
685 hppa_breakpoint_from_pc (CORE_ADDR *pc, int *len)
686 {
687 static const unsigned char breakpoint[] = {0x00, 0x01, 0x00, 0x04};
688 (*len) = sizeof (breakpoint);
689 return breakpoint;
690 }
691
692 /* Return the name of a register. */
693
694 const char *
695 hppa32_register_name (int i)
696 {
697 static char *names[] = {
698 "flags", "r1", "rp", "r3",
699 "r4", "r5", "r6", "r7",
700 "r8", "r9", "r10", "r11",
701 "r12", "r13", "r14", "r15",
702 "r16", "r17", "r18", "r19",
703 "r20", "r21", "r22", "r23",
704 "r24", "r25", "r26", "dp",
705 "ret0", "ret1", "sp", "r31",
706 "sar", "pcoqh", "pcsqh", "pcoqt",
707 "pcsqt", "eiem", "iir", "isr",
708 "ior", "ipsw", "goto", "sr4",
709 "sr0", "sr1", "sr2", "sr3",
710 "sr5", "sr6", "sr7", "cr0",
711 "cr8", "cr9", "ccr", "cr12",
712 "cr13", "cr24", "cr25", "cr26",
713 "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad",
714 "fpsr", "fpe1", "fpe2", "fpe3",
715 "fpe4", "fpe5", "fpe6", "fpe7",
716 "fr4", "fr4R", "fr5", "fr5R",
717 "fr6", "fr6R", "fr7", "fr7R",
718 "fr8", "fr8R", "fr9", "fr9R",
719 "fr10", "fr10R", "fr11", "fr11R",
720 "fr12", "fr12R", "fr13", "fr13R",
721 "fr14", "fr14R", "fr15", "fr15R",
722 "fr16", "fr16R", "fr17", "fr17R",
723 "fr18", "fr18R", "fr19", "fr19R",
724 "fr20", "fr20R", "fr21", "fr21R",
725 "fr22", "fr22R", "fr23", "fr23R",
726 "fr24", "fr24R", "fr25", "fr25R",
727 "fr26", "fr26R", "fr27", "fr27R",
728 "fr28", "fr28R", "fr29", "fr29R",
729 "fr30", "fr30R", "fr31", "fr31R"
730 };
731 if (i < 0 || i >= (sizeof (names) / sizeof (*names)))
732 return NULL;
733 else
734 return names[i];
735 }
736
737 const char *
738 hppa64_register_name (int i)
739 {
740 static char *names[] = {
741 "flags", "r1", "rp", "r3",
742 "r4", "r5", "r6", "r7",
743 "r8", "r9", "r10", "r11",
744 "r12", "r13", "r14", "r15",
745 "r16", "r17", "r18", "r19",
746 "r20", "r21", "r22", "r23",
747 "r24", "r25", "r26", "dp",
748 "ret0", "ret1", "sp", "r31",
749 "sar", "pcoqh", "pcsqh", "pcoqt",
750 "pcsqt", "eiem", "iir", "isr",
751 "ior", "ipsw", "goto", "sr4",
752 "sr0", "sr1", "sr2", "sr3",
753 "sr5", "sr6", "sr7", "cr0",
754 "cr8", "cr9", "ccr", "cr12",
755 "cr13", "cr24", "cr25", "cr26",
756 "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad",
757 "fpsr", "fpe1", "fpe2", "fpe3",
758 "fr4", "fr5", "fr6", "fr7",
759 "fr8", "fr9", "fr10", "fr11",
760 "fr12", "fr13", "fr14", "fr15",
761 "fr16", "fr17", "fr18", "fr19",
762 "fr20", "fr21", "fr22", "fr23",
763 "fr24", "fr25", "fr26", "fr27",
764 "fr28", "fr29", "fr30", "fr31"
765 };
766 if (i < 0 || i >= (sizeof (names) / sizeof (*names)))
767 return NULL;
768 else
769 return names[i];
770 }
771
772 /* This function pushes a stack frame with arguments as part of the
773 inferior function calling mechanism.
774
775 This is the version of the function for the 32-bit PA machines, in
776 which later arguments appear at lower addresses. (The stack always
777 grows towards higher addresses.)
778
779 We simply allocate the appropriate amount of stack space and put
780 arguments into their proper slots. */
781
782 CORE_ADDR
783 hppa32_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
784 struct regcache *regcache, CORE_ADDR bp_addr,
785 int nargs, struct value **args, CORE_ADDR sp,
786 int struct_return, CORE_ADDR struct_addr)
787 {
788 /* NOTE: cagney/2004-02-27: This is a guess - its implemented by
789 reverse engineering testsuite failures. */
790
791 /* Stack base address at which any pass-by-reference parameters are
792 stored. */
793 CORE_ADDR struct_end = 0;
794 /* Stack base address at which the first parameter is stored. */
795 CORE_ADDR param_end = 0;
796
797 /* The inner most end of the stack after all the parameters have
798 been pushed. */
799 CORE_ADDR new_sp = 0;
800
801 /* Two passes. First pass computes the location of everything,
802 second pass writes the bytes out. */
803 int write_pass;
804 for (write_pass = 0; write_pass < 2; write_pass++)
805 {
806 CORE_ADDR struct_ptr = 0;
807 CORE_ADDR param_ptr = 0;
808 int reg = 27; /* NOTE: Registers go down. */
809 int i;
810 for (i = 0; i < nargs; i++)
811 {
812 struct value *arg = args[i];
813 struct type *type = check_typedef (VALUE_TYPE (arg));
814 /* The corresponding parameter that is pushed onto the
815 stack, and [possibly] passed in a register. */
816 char param_val[8];
817 int param_len;
818 memset (param_val, 0, sizeof param_val);
819 if (TYPE_LENGTH (type) > 8)
820 {
821 /* Large parameter, pass by reference. Store the value
822 in "struct" area and then pass its address. */
823 param_len = 4;
824 struct_ptr += align_up (TYPE_LENGTH (type), 8);
825 if (write_pass)
826 write_memory (struct_end - struct_ptr, VALUE_CONTENTS (arg),
827 TYPE_LENGTH (type));
828 store_unsigned_integer (param_val, 4, struct_end - struct_ptr);
829 }
830 else if (TYPE_CODE (type) == TYPE_CODE_INT
831 || TYPE_CODE (type) == TYPE_CODE_ENUM)
832 {
833 /* Integer value store, right aligned. "unpack_long"
834 takes care of any sign-extension problems. */
835 param_len = align_up (TYPE_LENGTH (type), 4);
836 store_unsigned_integer (param_val, param_len,
837 unpack_long (type,
838 VALUE_CONTENTS (arg)));
839 }
840 else
841 {
842 /* Small struct value, store right aligned? */
843 param_len = align_up (TYPE_LENGTH (type), 4);
844 memcpy (param_val + param_len - TYPE_LENGTH (type),
845 VALUE_CONTENTS (arg), TYPE_LENGTH (type));
846 }
847 param_ptr += param_len;
848 reg -= param_len / 4;
849 if (write_pass)
850 {
851 write_memory (param_end - param_ptr, param_val, param_len);
852 if (reg >= 23)
853 {
854 regcache_cooked_write (regcache, reg, param_val);
855 if (param_len > 4)
856 regcache_cooked_write (regcache, reg + 1, param_val + 4);
857 }
858 }
859 }
860
861 /* Update the various stack pointers. */
862 if (!write_pass)
863 {
864 struct_end = sp + struct_ptr;
865 /* PARAM_PTR already accounts for all the arguments passed
866 by the user. However, the ABI mandates minimum stack
867 space allocations for outgoing arguments. The ABI also
868 mandates minimum stack alignments which we must
869 preserve. */
870 param_end = struct_end + max (align_up (param_ptr, 8), 16);
871 }
872 }
873
874 /* If a structure has to be returned, set up register 28 to hold its
875 address */
876 if (struct_return)
877 write_register (28, struct_addr);
878
879 /* Set the return address. */
880 regcache_cooked_write_unsigned (regcache, RP_REGNUM, bp_addr);
881
882 /* Update the Stack Pointer. */
883 regcache_cooked_write_unsigned (regcache, SP_REGNUM, param_end + 32);
884
885 /* The stack will have 32 bytes of additional space for a frame marker. */
886 return param_end + 32;
887 }
888
889 /* This function pushes a stack frame with arguments as part of the
890 inferior function calling mechanism.
891
892 This is the version for the PA64, in which later arguments appear
893 at higher addresses. (The stack always grows towards higher
894 addresses.)
895
896 We simply allocate the appropriate amount of stack space and put
897 arguments into their proper slots.
898
899 This ABI also requires that the caller provide an argument pointer
900 to the callee, so we do that too. */
901
902 CORE_ADDR
903 hppa64_push_dummy_call (struct gdbarch *gdbarch, CORE_ADDR func_addr,
904 struct regcache *regcache, CORE_ADDR bp_addr,
905 int nargs, struct value **args, CORE_ADDR sp,
906 int struct_return, CORE_ADDR struct_addr)
907 {
908 /* NOTE: cagney/2004-02-27: This is a guess - its implemented by
909 reverse engineering testsuite failures. */
910
911 /* Stack base address at which any pass-by-reference parameters are
912 stored. */
913 CORE_ADDR struct_end = 0;
914 /* Stack base address at which the first parameter is stored. */
915 CORE_ADDR param_end = 0;
916
917 /* The inner most end of the stack after all the parameters have
918 been pushed. */
919 CORE_ADDR new_sp = 0;
920
921 /* Two passes. First pass computes the location of everything,
922 second pass writes the bytes out. */
923 int write_pass;
924 for (write_pass = 0; write_pass < 2; write_pass++)
925 {
926 CORE_ADDR struct_ptr = 0;
927 CORE_ADDR param_ptr = 0;
928 int i;
929 for (i = 0; i < nargs; i++)
930 {
931 struct value *arg = args[i];
932 struct type *type = check_typedef (VALUE_TYPE (arg));
933 if ((TYPE_CODE (type) == TYPE_CODE_INT
934 || TYPE_CODE (type) == TYPE_CODE_ENUM)
935 && TYPE_LENGTH (type) <= 8)
936 {
937 /* Integer value store, right aligned. "unpack_long"
938 takes care of any sign-extension problems. */
939 param_ptr += 8;
940 if (write_pass)
941 {
942 ULONGEST val = unpack_long (type, VALUE_CONTENTS (arg));
943 int reg = 27 - param_ptr / 8;
944 write_memory_unsigned_integer (param_end - param_ptr,
945 val, 8);
946 if (reg >= 19)
947 regcache_cooked_write_unsigned (regcache, reg, val);
948 }
949 }
950 else
951 {
952 /* Small struct value, store left aligned? */
953 int reg;
954 if (TYPE_LENGTH (type) > 8)
955 {
956 param_ptr = align_up (param_ptr, 16);
957 reg = 26 - param_ptr / 8;
958 param_ptr += align_up (TYPE_LENGTH (type), 16);
959 }
960 else
961 {
962 param_ptr = align_up (param_ptr, 8);
963 reg = 26 - param_ptr / 8;
964 param_ptr += align_up (TYPE_LENGTH (type), 8);
965 }
966 if (write_pass)
967 {
968 int byte;
969 write_memory (param_end - param_ptr, VALUE_CONTENTS (arg),
970 TYPE_LENGTH (type));
971 for (byte = 0; byte < TYPE_LENGTH (type); byte += 8)
972 {
973 if (reg >= 19)
974 {
975 int len = min (8, TYPE_LENGTH (type) - byte);
976 regcache_cooked_write_part (regcache, reg, 0, len,
977 VALUE_CONTENTS (arg) + byte);
978 }
979 reg--;
980 }
981 }
982 }
983 }
984 /* Update the various stack pointers. */
985 if (!write_pass)
986 {
987 struct_end = sp + struct_ptr;
988 /* PARAM_PTR already accounts for all the arguments passed
989 by the user. However, the ABI mandates minimum stack
990 space allocations for outgoing arguments. The ABI also
991 mandates minimum stack alignments which we must
992 preserve. */
993 param_end = struct_end + max (align_up (param_ptr, 16), 64);
994 }
995 }
996
997 /* If a structure has to be returned, set up register 28 to hold its
998 address */
999 if (struct_return)
1000 write_register (28, struct_addr);
1001
1002 /* Set the return address. */
1003 regcache_cooked_write_unsigned (regcache, RP_REGNUM, bp_addr);
1004
1005 /* Update the Stack Pointer. */
1006 regcache_cooked_write_unsigned (regcache, SP_REGNUM, param_end + 64);
1007
1008 /* The stack will have 32 bytes of additional space for a frame marker. */
1009 return param_end + 64;
1010 }
1011
1012 static CORE_ADDR
1013 hppa32_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr)
1014 {
1015 /* HP frames are 64-byte (or cache line) aligned (yes that's _byte_
1016 and not _bit_)! */
1017 return align_up (addr, 64);
1018 }
1019
1020 /* Force all frames to 16-byte alignment. Better safe than sorry. */
1021
1022 static CORE_ADDR
1023 hppa64_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr)
1024 {
1025 /* Just always 16-byte align. */
1026 return align_up (addr, 16);
1027 }
1028
1029
1030 /* Get the PC from %r31 if currently in a syscall. Also mask out privilege
1031 bits. */
1032
1033 static CORE_ADDR
1034 hppa_target_read_pc (ptid_t ptid)
1035 {
1036 int flags = read_register_pid (FLAGS_REGNUM, ptid);
1037
1038 /* The following test does not belong here. It is OS-specific, and belongs
1039 in native code. */
1040 /* Test SS_INSYSCALL */
1041 if (flags & 2)
1042 return read_register_pid (31, ptid) & ~0x3;
1043
1044 return read_register_pid (PCOQ_HEAD_REGNUM, ptid) & ~0x3;
1045 }
1046
1047 /* Write out the PC. If currently in a syscall, then also write the new
1048 PC value into %r31. */
1049
1050 static void
1051 hppa_target_write_pc (CORE_ADDR v, ptid_t ptid)
1052 {
1053 int flags = read_register_pid (FLAGS_REGNUM, ptid);
1054
1055 /* The following test does not belong here. It is OS-specific, and belongs
1056 in native code. */
1057 /* If in a syscall, then set %r31. Also make sure to get the
1058 privilege bits set correctly. */
1059 /* Test SS_INSYSCALL */
1060 if (flags & 2)
1061 write_register_pid (31, v | 0x3, ptid);
1062
1063 write_register_pid (PCOQ_HEAD_REGNUM, v, ptid);
1064 write_register_pid (PCOQ_TAIL_REGNUM, v + 4, ptid);
1065 }
1066
1067 /* return the alignment of a type in bytes. Structures have the maximum
1068 alignment required by their fields. */
1069
1070 static int
1071 hppa_alignof (struct type *type)
1072 {
1073 int max_align, align, i;
1074 CHECK_TYPEDEF (type);
1075 switch (TYPE_CODE (type))
1076 {
1077 case TYPE_CODE_PTR:
1078 case TYPE_CODE_INT:
1079 case TYPE_CODE_FLT:
1080 return TYPE_LENGTH (type);
1081 case TYPE_CODE_ARRAY:
1082 return hppa_alignof (TYPE_FIELD_TYPE (type, 0));
1083 case TYPE_CODE_STRUCT:
1084 case TYPE_CODE_UNION:
1085 max_align = 1;
1086 for (i = 0; i < TYPE_NFIELDS (type); i++)
1087 {
1088 /* Bit fields have no real alignment. */
1089 /* if (!TYPE_FIELD_BITPOS (type, i)) */
1090 if (!TYPE_FIELD_BITSIZE (type, i)) /* elz: this should be bitsize */
1091 {
1092 align = hppa_alignof (TYPE_FIELD_TYPE (type, i));
1093 max_align = max (max_align, align);
1094 }
1095 }
1096 return max_align;
1097 default:
1098 return 4;
1099 }
1100 }
1101
1102 /* Return one if PC is in the call path of a trampoline, else return zero.
1103
1104 Note we return one for *any* call trampoline (long-call, arg-reloc), not
1105 just shared library trampolines (import, export). */
1106
1107 static int
1108 hppa_in_solib_call_trampoline (CORE_ADDR pc, char *name)
1109 {
1110 struct minimal_symbol *minsym;
1111 struct unwind_table_entry *u;
1112 static CORE_ADDR dyncall = 0;
1113 static CORE_ADDR sr4export = 0;
1114
1115 #ifdef GDB_TARGET_IS_HPPA_20W
1116 /* PA64 has a completely different stub/trampoline scheme. Is it
1117 better? Maybe. It's certainly harder to determine with any
1118 certainty that we are in a stub because we can not refer to the
1119 unwinders to help.
1120
1121 The heuristic is simple. Try to lookup the current PC value in th
1122 minimal symbol table. If that fails, then assume we are not in a
1123 stub and return.
1124
1125 Then see if the PC value falls within the section bounds for the
1126 section containing the minimal symbol we found in the first
1127 step. If it does, then assume we are not in a stub and return.
1128
1129 Finally peek at the instructions to see if they look like a stub. */
1130 {
1131 struct minimal_symbol *minsym;
1132 asection *sec;
1133 CORE_ADDR addr;
1134 int insn, i;
1135
1136 minsym = lookup_minimal_symbol_by_pc (pc);
1137 if (! minsym)
1138 return 0;
1139
1140 sec = SYMBOL_BFD_SECTION (minsym);
1141
1142 if (bfd_get_section_vma (sec->owner, sec) <= pc
1143 && pc < (bfd_get_section_vma (sec->owner, sec)
1144 + bfd_section_size (sec->owner, sec)))
1145 return 0;
1146
1147 /* We might be in a stub. Peek at the instructions. Stubs are 3
1148 instructions long. */
1149 insn = read_memory_integer (pc, 4);
1150
1151 /* Find out where we think we are within the stub. */
1152 if ((insn & 0xffffc00e) == 0x53610000)
1153 addr = pc;
1154 else if ((insn & 0xffffffff) == 0xe820d000)
1155 addr = pc - 4;
1156 else if ((insn & 0xffffc00e) == 0x537b0000)
1157 addr = pc - 8;
1158 else
1159 return 0;
1160
1161 /* Now verify each insn in the range looks like a stub instruction. */
1162 insn = read_memory_integer (addr, 4);
1163 if ((insn & 0xffffc00e) != 0x53610000)
1164 return 0;
1165
1166 /* Now verify each insn in the range looks like a stub instruction. */
1167 insn = read_memory_integer (addr + 4, 4);
1168 if ((insn & 0xffffffff) != 0xe820d000)
1169 return 0;
1170
1171 /* Now verify each insn in the range looks like a stub instruction. */
1172 insn = read_memory_integer (addr + 8, 4);
1173 if ((insn & 0xffffc00e) != 0x537b0000)
1174 return 0;
1175
1176 /* Looks like a stub. */
1177 return 1;
1178 }
1179 #endif
1180
1181 /* FIXME XXX - dyncall and sr4export must be initialized whenever we get a
1182 new exec file */
1183
1184 /* First see if PC is in one of the two C-library trampolines. */
1185 if (!dyncall)
1186 {
1187 minsym = lookup_minimal_symbol ("$$dyncall", NULL, NULL);
1188 if (minsym)
1189 dyncall = SYMBOL_VALUE_ADDRESS (minsym);
1190 else
1191 dyncall = -1;
1192 }
1193
1194 if (!sr4export)
1195 {
1196 minsym = lookup_minimal_symbol ("_sr4export", NULL, NULL);
1197 if (minsym)
1198 sr4export = SYMBOL_VALUE_ADDRESS (minsym);
1199 else
1200 sr4export = -1;
1201 }
1202
1203 if (pc == dyncall || pc == sr4export)
1204 return 1;
1205
1206 minsym = lookup_minimal_symbol_by_pc (pc);
1207 if (minsym && strcmp (DEPRECATED_SYMBOL_NAME (minsym), ".stub") == 0)
1208 return 1;
1209
1210 /* Get the unwind descriptor corresponding to PC, return zero
1211 if no unwind was found. */
1212 u = find_unwind_entry (pc);
1213 if (!u)
1214 return 0;
1215
1216 /* If this isn't a linker stub, then return now. */
1217 if (u->stub_unwind.stub_type == 0)
1218 return 0;
1219
1220 /* By definition a long-branch stub is a call stub. */
1221 if (u->stub_unwind.stub_type == LONG_BRANCH)
1222 return 1;
1223
1224 /* The call and return path execute the same instructions within
1225 an IMPORT stub! So an IMPORT stub is both a call and return
1226 trampoline. */
1227 if (u->stub_unwind.stub_type == IMPORT)
1228 return 1;
1229
1230 /* Parameter relocation stubs always have a call path and may have a
1231 return path. */
1232 if (u->stub_unwind.stub_type == PARAMETER_RELOCATION
1233 || u->stub_unwind.stub_type == EXPORT)
1234 {
1235 CORE_ADDR addr;
1236
1237 /* Search forward from the current PC until we hit a branch
1238 or the end of the stub. */
1239 for (addr = pc; addr <= u->region_end; addr += 4)
1240 {
1241 unsigned long insn;
1242
1243 insn = read_memory_integer (addr, 4);
1244
1245 /* Does it look like a bl? If so then it's the call path, if
1246 we find a bv or be first, then we're on the return path. */
1247 if ((insn & 0xfc00e000) == 0xe8000000)
1248 return 1;
1249 else if ((insn & 0xfc00e001) == 0xe800c000
1250 || (insn & 0xfc000000) == 0xe0000000)
1251 return 0;
1252 }
1253
1254 /* Should never happen. */
1255 warning ("Unable to find branch in parameter relocation stub.\n");
1256 return 0;
1257 }
1258
1259 /* Unknown stub type. For now, just return zero. */
1260 return 0;
1261 }
1262
1263 /* Return one if PC is in the return path of a trampoline, else return zero.
1264
1265 Note we return one for *any* call trampoline (long-call, arg-reloc), not
1266 just shared library trampolines (import, export). */
1267
1268 static int
1269 hppa_in_solib_return_trampoline (CORE_ADDR pc, char *name)
1270 {
1271 struct unwind_table_entry *u;
1272
1273 /* Get the unwind descriptor corresponding to PC, return zero
1274 if no unwind was found. */
1275 u = find_unwind_entry (pc);
1276 if (!u)
1277 return 0;
1278
1279 /* If this isn't a linker stub or it's just a long branch stub, then
1280 return zero. */
1281 if (u->stub_unwind.stub_type == 0 || u->stub_unwind.stub_type == LONG_BRANCH)
1282 return 0;
1283
1284 /* The call and return path execute the same instructions within
1285 an IMPORT stub! So an IMPORT stub is both a call and return
1286 trampoline. */
1287 if (u->stub_unwind.stub_type == IMPORT)
1288 return 1;
1289
1290 /* Parameter relocation stubs always have a call path and may have a
1291 return path. */
1292 if (u->stub_unwind.stub_type == PARAMETER_RELOCATION
1293 || u->stub_unwind.stub_type == EXPORT)
1294 {
1295 CORE_ADDR addr;
1296
1297 /* Search forward from the current PC until we hit a branch
1298 or the end of the stub. */
1299 for (addr = pc; addr <= u->region_end; addr += 4)
1300 {
1301 unsigned long insn;
1302
1303 insn = read_memory_integer (addr, 4);
1304
1305 /* Does it look like a bl? If so then it's the call path, if
1306 we find a bv or be first, then we're on the return path. */
1307 if ((insn & 0xfc00e000) == 0xe8000000)
1308 return 0;
1309 else if ((insn & 0xfc00e001) == 0xe800c000
1310 || (insn & 0xfc000000) == 0xe0000000)
1311 return 1;
1312 }
1313
1314 /* Should never happen. */
1315 warning ("Unable to find branch in parameter relocation stub.\n");
1316 return 0;
1317 }
1318
1319 /* Unknown stub type. For now, just return zero. */
1320 return 0;
1321
1322 }
1323
1324 /* Figure out if PC is in a trampoline, and if so find out where
1325 the trampoline will jump to. If not in a trampoline, return zero.
1326
1327 Simple code examination probably is not a good idea since the code
1328 sequences in trampolines can also appear in user code.
1329
1330 We use unwinds and information from the minimal symbol table to
1331 determine when we're in a trampoline. This won't work for ELF
1332 (yet) since it doesn't create stub unwind entries. Whether or
1333 not ELF will create stub unwinds or normal unwinds for linker
1334 stubs is still being debated.
1335
1336 This should handle simple calls through dyncall or sr4export,
1337 long calls, argument relocation stubs, and dyncall/sr4export
1338 calling an argument relocation stub. It even handles some stubs
1339 used in dynamic executables. */
1340
1341 static CORE_ADDR
1342 hppa_skip_trampoline_code (CORE_ADDR pc)
1343 {
1344 long orig_pc = pc;
1345 long prev_inst, curr_inst, loc;
1346 static CORE_ADDR dyncall = 0;
1347 static CORE_ADDR dyncall_external = 0;
1348 static CORE_ADDR sr4export = 0;
1349 struct minimal_symbol *msym;
1350 struct unwind_table_entry *u;
1351
1352 /* FIXME XXX - dyncall and sr4export must be initialized whenever we get a
1353 new exec file */
1354
1355 if (!dyncall)
1356 {
1357 msym = lookup_minimal_symbol ("$$dyncall", NULL, NULL);
1358 if (msym)
1359 dyncall = SYMBOL_VALUE_ADDRESS (msym);
1360 else
1361 dyncall = -1;
1362 }
1363
1364 if (!dyncall_external)
1365 {
1366 msym = lookup_minimal_symbol ("$$dyncall_external", NULL, NULL);
1367 if (msym)
1368 dyncall_external = SYMBOL_VALUE_ADDRESS (msym);
1369 else
1370 dyncall_external = -1;
1371 }
1372
1373 if (!sr4export)
1374 {
1375 msym = lookup_minimal_symbol ("_sr4export", NULL, NULL);
1376 if (msym)
1377 sr4export = SYMBOL_VALUE_ADDRESS (msym);
1378 else
1379 sr4export = -1;
1380 }
1381
1382 /* Addresses passed to dyncall may *NOT* be the actual address
1383 of the function. So we may have to do something special. */
1384 if (pc == dyncall)
1385 {
1386 pc = (CORE_ADDR) read_register (22);
1387
1388 /* If bit 30 (counting from the left) is on, then pc is the address of
1389 the PLT entry for this function, not the address of the function
1390 itself. Bit 31 has meaning too, but only for MPE. */
1391 if (pc & 0x2)
1392 pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, TARGET_PTR_BIT / 8);
1393 }
1394 if (pc == dyncall_external)
1395 {
1396 pc = (CORE_ADDR) read_register (22);
1397 pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, TARGET_PTR_BIT / 8);
1398 }
1399 else if (pc == sr4export)
1400 pc = (CORE_ADDR) (read_register (22));
1401
1402 /* Get the unwind descriptor corresponding to PC, return zero
1403 if no unwind was found. */
1404 u = find_unwind_entry (pc);
1405 if (!u)
1406 return 0;
1407
1408 /* If this isn't a linker stub, then return now. */
1409 /* elz: attention here! (FIXME) because of a compiler/linker
1410 error, some stubs which should have a non zero stub_unwind.stub_type
1411 have unfortunately a value of zero. So this function would return here
1412 as if we were not in a trampoline. To fix this, we go look at the partial
1413 symbol information, which reports this guy as a stub.
1414 (FIXME): Unfortunately, we are not that lucky: it turns out that the
1415 partial symbol information is also wrong sometimes. This is because
1416 when it is entered (somread.c::som_symtab_read()) it can happen that
1417 if the type of the symbol (from the som) is Entry, and the symbol is
1418 in a shared library, then it can also be a trampoline. This would
1419 be OK, except that I believe the way they decide if we are ina shared library
1420 does not work. SOOOO..., even if we have a regular function w/o trampolines
1421 its minimal symbol can be assigned type mst_solib_trampoline.
1422 Also, if we find that the symbol is a real stub, then we fix the unwind
1423 descriptor, and define the stub type to be EXPORT.
1424 Hopefully this is correct most of the times. */
1425 if (u->stub_unwind.stub_type == 0)
1426 {
1427
1428 /* elz: NOTE (FIXME!) once the problem with the unwind information is fixed
1429 we can delete all the code which appears between the lines */
1430 /*--------------------------------------------------------------------------*/
1431 msym = lookup_minimal_symbol_by_pc (pc);
1432
1433 if (msym == NULL || MSYMBOL_TYPE (msym) != mst_solib_trampoline)
1434 return orig_pc == pc ? 0 : pc & ~0x3;
1435
1436 else if (msym != NULL && MSYMBOL_TYPE (msym) == mst_solib_trampoline)
1437 {
1438 struct objfile *objfile;
1439 struct minimal_symbol *msymbol;
1440 int function_found = 0;
1441
1442 /* go look if there is another minimal symbol with the same name as
1443 this one, but with type mst_text. This would happen if the msym
1444 is an actual trampoline, in which case there would be another
1445 symbol with the same name corresponding to the real function */
1446
1447 ALL_MSYMBOLS (objfile, msymbol)
1448 {
1449 if (MSYMBOL_TYPE (msymbol) == mst_text
1450 && DEPRECATED_STREQ (DEPRECATED_SYMBOL_NAME (msymbol), DEPRECATED_SYMBOL_NAME (msym)))
1451 {
1452 function_found = 1;
1453 break;
1454 }
1455 }
1456
1457 if (function_found)
1458 /* the type of msym is correct (mst_solib_trampoline), but
1459 the unwind info is wrong, so set it to the correct value */
1460 u->stub_unwind.stub_type = EXPORT;
1461 else
1462 /* the stub type info in the unwind is correct (this is not a
1463 trampoline), but the msym type information is wrong, it
1464 should be mst_text. So we need to fix the msym, and also
1465 get out of this function */
1466 {
1467 MSYMBOL_TYPE (msym) = mst_text;
1468 return orig_pc == pc ? 0 : pc & ~0x3;
1469 }
1470 }
1471
1472 /*--------------------------------------------------------------------------*/
1473 }
1474
1475 /* It's a stub. Search for a branch and figure out where it goes.
1476 Note we have to handle multi insn branch sequences like ldil;ble.
1477 Most (all?) other branches can be determined by examining the contents
1478 of certain registers and the stack. */
1479
1480 loc = pc;
1481 curr_inst = 0;
1482 prev_inst = 0;
1483 while (1)
1484 {
1485 /* Make sure we haven't walked outside the range of this stub. */
1486 if (u != find_unwind_entry (loc))
1487 {
1488 warning ("Unable to find branch in linker stub");
1489 return orig_pc == pc ? 0 : pc & ~0x3;
1490 }
1491
1492 prev_inst = curr_inst;
1493 curr_inst = read_memory_integer (loc, 4);
1494
1495 /* Does it look like a branch external using %r1? Then it's the
1496 branch from the stub to the actual function. */
1497 if ((curr_inst & 0xffe0e000) == 0xe0202000)
1498 {
1499 /* Yup. See if the previous instruction loaded
1500 a value into %r1. If so compute and return the jump address. */
1501 if ((prev_inst & 0xffe00000) == 0x20200000)
1502 return (extract_21 (prev_inst) + extract_17 (curr_inst)) & ~0x3;
1503 else
1504 {
1505 warning ("Unable to find ldil X,%%r1 before ble Y(%%sr4,%%r1).");
1506 return orig_pc == pc ? 0 : pc & ~0x3;
1507 }
1508 }
1509
1510 /* Does it look like a be 0(sr0,%r21)? OR
1511 Does it look like a be, n 0(sr0,%r21)? OR
1512 Does it look like a bve (r21)? (this is on PA2.0)
1513 Does it look like a bve, n(r21)? (this is also on PA2.0)
1514 That's the branch from an
1515 import stub to an export stub.
1516
1517 It is impossible to determine the target of the branch via
1518 simple examination of instructions and/or data (consider
1519 that the address in the plabel may be the address of the
1520 bind-on-reference routine in the dynamic loader).
1521
1522 So we have try an alternative approach.
1523
1524 Get the name of the symbol at our current location; it should
1525 be a stub symbol with the same name as the symbol in the
1526 shared library.
1527
1528 Then lookup a minimal symbol with the same name; we should
1529 get the minimal symbol for the target routine in the shared
1530 library as those take precedence of import/export stubs. */
1531 if ((curr_inst == 0xe2a00000) ||
1532 (curr_inst == 0xe2a00002) ||
1533 (curr_inst == 0xeaa0d000) ||
1534 (curr_inst == 0xeaa0d002))
1535 {
1536 struct minimal_symbol *stubsym, *libsym;
1537
1538 stubsym = lookup_minimal_symbol_by_pc (loc);
1539 if (stubsym == NULL)
1540 {
1541 warning ("Unable to find symbol for 0x%lx", loc);
1542 return orig_pc == pc ? 0 : pc & ~0x3;
1543 }
1544
1545 libsym = lookup_minimal_symbol (DEPRECATED_SYMBOL_NAME (stubsym), NULL, NULL);
1546 if (libsym == NULL)
1547 {
1548 warning ("Unable to find library symbol for %s\n",
1549 DEPRECATED_SYMBOL_NAME (stubsym));
1550 return orig_pc == pc ? 0 : pc & ~0x3;
1551 }
1552
1553 return SYMBOL_VALUE (libsym);
1554 }
1555
1556 /* Does it look like bl X,%rp or bl X,%r0? Another way to do a
1557 branch from the stub to the actual function. */
1558 /*elz */
1559 else if ((curr_inst & 0xffe0e000) == 0xe8400000
1560 || (curr_inst & 0xffe0e000) == 0xe8000000
1561 || (curr_inst & 0xffe0e000) == 0xe800A000)
1562 return (loc + extract_17 (curr_inst) + 8) & ~0x3;
1563
1564 /* Does it look like bv (rp)? Note this depends on the
1565 current stack pointer being the same as the stack
1566 pointer in the stub itself! This is a branch on from the
1567 stub back to the original caller. */
1568 /*else if ((curr_inst & 0xffe0e000) == 0xe840c000) */
1569 else if ((curr_inst & 0xffe0f000) == 0xe840c000)
1570 {
1571 /* Yup. See if the previous instruction loaded
1572 rp from sp - 8. */
1573 if (prev_inst == 0x4bc23ff1)
1574 return (read_memory_integer
1575 (read_register (HPPA_SP_REGNUM) - 8, 4)) & ~0x3;
1576 else
1577 {
1578 warning ("Unable to find restore of %%rp before bv (%%rp).");
1579 return orig_pc == pc ? 0 : pc & ~0x3;
1580 }
1581 }
1582
1583 /* elz: added this case to capture the new instruction
1584 at the end of the return part of an export stub used by
1585 the PA2.0: BVE, n (rp) */
1586 else if ((curr_inst & 0xffe0f000) == 0xe840d000)
1587 {
1588 return (read_memory_integer
1589 (read_register (HPPA_SP_REGNUM) - 24, TARGET_PTR_BIT / 8)) & ~0x3;
1590 }
1591
1592 /* What about be,n 0(sr0,%rp)? It's just another way we return to
1593 the original caller from the stub. Used in dynamic executables. */
1594 else if (curr_inst == 0xe0400002)
1595 {
1596 /* The value we jump to is sitting in sp - 24. But that's
1597 loaded several instructions before the be instruction.
1598 I guess we could check for the previous instruction being
1599 mtsp %r1,%sr0 if we want to do sanity checking. */
1600 return (read_memory_integer
1601 (read_register (HPPA_SP_REGNUM) - 24, TARGET_PTR_BIT / 8)) & ~0x3;
1602 }
1603
1604 /* Haven't found the branch yet, but we're still in the stub.
1605 Keep looking. */
1606 loc += 4;
1607 }
1608 }
1609
1610
1611 /* For the given instruction (INST), return any adjustment it makes
1612 to the stack pointer or zero for no adjustment.
1613
1614 This only handles instructions commonly found in prologues. */
1615
1616 static int
1617 prologue_inst_adjust_sp (unsigned long inst)
1618 {
1619 /* This must persist across calls. */
1620 static int save_high21;
1621
1622 /* The most common way to perform a stack adjustment ldo X(sp),sp */
1623 if ((inst & 0xffffc000) == 0x37de0000)
1624 return extract_14 (inst);
1625
1626 /* stwm X,D(sp) */
1627 if ((inst & 0xffe00000) == 0x6fc00000)
1628 return extract_14 (inst);
1629
1630 /* std,ma X,D(sp) */
1631 if ((inst & 0xffe00008) == 0x73c00008)
1632 return (inst & 0x1 ? -1 << 13 : 0) | (((inst >> 4) & 0x3ff) << 3);
1633
1634 /* addil high21,%r1; ldo low11,(%r1),%r30)
1635 save high bits in save_high21 for later use. */
1636 if ((inst & 0xffe00000) == 0x28200000)
1637 {
1638 save_high21 = extract_21 (inst);
1639 return 0;
1640 }
1641
1642 if ((inst & 0xffff0000) == 0x343e0000)
1643 return save_high21 + extract_14 (inst);
1644
1645 /* fstws as used by the HP compilers. */
1646 if ((inst & 0xffffffe0) == 0x2fd01220)
1647 return extract_5_load (inst);
1648
1649 /* No adjustment. */
1650 return 0;
1651 }
1652
1653 /* Return nonzero if INST is a branch of some kind, else return zero. */
1654
1655 static int
1656 is_branch (unsigned long inst)
1657 {
1658 switch (inst >> 26)
1659 {
1660 case 0x20:
1661 case 0x21:
1662 case 0x22:
1663 case 0x23:
1664 case 0x27:
1665 case 0x28:
1666 case 0x29:
1667 case 0x2a:
1668 case 0x2b:
1669 case 0x2f:
1670 case 0x30:
1671 case 0x31:
1672 case 0x32:
1673 case 0x33:
1674 case 0x38:
1675 case 0x39:
1676 case 0x3a:
1677 case 0x3b:
1678 return 1;
1679
1680 default:
1681 return 0;
1682 }
1683 }
1684
1685 /* Return the register number for a GR which is saved by INST or
1686 zero it INST does not save a GR. */
1687
1688 static int
1689 inst_saves_gr (unsigned long inst)
1690 {
1691 /* Does it look like a stw? */
1692 if ((inst >> 26) == 0x1a || (inst >> 26) == 0x1b
1693 || (inst >> 26) == 0x1f
1694 || ((inst >> 26) == 0x1f
1695 && ((inst >> 6) == 0xa)))
1696 return extract_5R_store (inst);
1697
1698 /* Does it look like a std? */
1699 if ((inst >> 26) == 0x1c
1700 || ((inst >> 26) == 0x03
1701 && ((inst >> 6) & 0xf) == 0xb))
1702 return extract_5R_store (inst);
1703
1704 /* Does it look like a stwm? GCC & HPC may use this in prologues. */
1705 if ((inst >> 26) == 0x1b)
1706 return extract_5R_store (inst);
1707
1708 /* Does it look like sth or stb? HPC versions 9.0 and later use these
1709 too. */
1710 if ((inst >> 26) == 0x19 || (inst >> 26) == 0x18
1711 || ((inst >> 26) == 0x3
1712 && (((inst >> 6) & 0xf) == 0x8
1713 || (inst >> 6) & 0xf) == 0x9))
1714 return extract_5R_store (inst);
1715
1716 return 0;
1717 }
1718
1719 /* Return the register number for a FR which is saved by INST or
1720 zero it INST does not save a FR.
1721
1722 Note we only care about full 64bit register stores (that's the only
1723 kind of stores the prologue will use).
1724
1725 FIXME: What about argument stores with the HP compiler in ANSI mode? */
1726
1727 static int
1728 inst_saves_fr (unsigned long inst)
1729 {
1730 /* is this an FSTD ? */
1731 if ((inst & 0xfc00dfc0) == 0x2c001200)
1732 return extract_5r_store (inst);
1733 if ((inst & 0xfc000002) == 0x70000002)
1734 return extract_5R_store (inst);
1735 /* is this an FSTW ? */
1736 if ((inst & 0xfc00df80) == 0x24001200)
1737 return extract_5r_store (inst);
1738 if ((inst & 0xfc000002) == 0x7c000000)
1739 return extract_5R_store (inst);
1740 return 0;
1741 }
1742
1743 /* Advance PC across any function entry prologue instructions
1744 to reach some "real" code.
1745
1746 Use information in the unwind table to determine what exactly should
1747 be in the prologue. */
1748
1749
1750 CORE_ADDR
1751 skip_prologue_hard_way (CORE_ADDR pc)
1752 {
1753 char buf[4];
1754 CORE_ADDR orig_pc = pc;
1755 unsigned long inst, stack_remaining, save_gr, save_fr, save_rp, save_sp;
1756 unsigned long args_stored, status, i, restart_gr, restart_fr;
1757 struct unwind_table_entry *u;
1758
1759 restart_gr = 0;
1760 restart_fr = 0;
1761
1762 restart:
1763 u = find_unwind_entry (pc);
1764 if (!u)
1765 return pc;
1766
1767 /* If we are not at the beginning of a function, then return now. */
1768 if ((pc & ~0x3) != u->region_start)
1769 return pc;
1770
1771 /* This is how much of a frame adjustment we need to account for. */
1772 stack_remaining = u->Total_frame_size << 3;
1773
1774 /* Magic register saves we want to know about. */
1775 save_rp = u->Save_RP;
1776 save_sp = u->Save_SP;
1777
1778 /* An indication that args may be stored into the stack. Unfortunately
1779 the HPUX compilers tend to set this in cases where no args were
1780 stored too!. */
1781 args_stored = 1;
1782
1783 /* Turn the Entry_GR field into a bitmask. */
1784 save_gr = 0;
1785 for (i = 3; i < u->Entry_GR + 3; i++)
1786 {
1787 /* Frame pointer gets saved into a special location. */
1788 if (u->Save_SP && i == HPPA_FP_REGNUM)
1789 continue;
1790
1791 save_gr |= (1 << i);
1792 }
1793 save_gr &= ~restart_gr;
1794
1795 /* Turn the Entry_FR field into a bitmask too. */
1796 save_fr = 0;
1797 for (i = 12; i < u->Entry_FR + 12; i++)
1798 save_fr |= (1 << i);
1799 save_fr &= ~restart_fr;
1800
1801 /* Loop until we find everything of interest or hit a branch.
1802
1803 For unoptimized GCC code and for any HP CC code this will never ever
1804 examine any user instructions.
1805
1806 For optimzied GCC code we're faced with problems. GCC will schedule
1807 its prologue and make prologue instructions available for delay slot
1808 filling. The end result is user code gets mixed in with the prologue
1809 and a prologue instruction may be in the delay slot of the first branch
1810 or call.
1811
1812 Some unexpected things are expected with debugging optimized code, so
1813 we allow this routine to walk past user instructions in optimized
1814 GCC code. */
1815 while (save_gr || save_fr || save_rp || save_sp || stack_remaining > 0
1816 || args_stored)
1817 {
1818 unsigned int reg_num;
1819 unsigned long old_stack_remaining, old_save_gr, old_save_fr;
1820 unsigned long old_save_rp, old_save_sp, next_inst;
1821
1822 /* Save copies of all the triggers so we can compare them later
1823 (only for HPC). */
1824 old_save_gr = save_gr;
1825 old_save_fr = save_fr;
1826 old_save_rp = save_rp;
1827 old_save_sp = save_sp;
1828 old_stack_remaining = stack_remaining;
1829
1830 status = target_read_memory (pc, buf, 4);
1831 inst = extract_unsigned_integer (buf, 4);
1832
1833 /* Yow! */
1834 if (status != 0)
1835 return pc;
1836
1837 /* Note the interesting effects of this instruction. */
1838 stack_remaining -= prologue_inst_adjust_sp (inst);
1839
1840 /* There are limited ways to store the return pointer into the
1841 stack. */
1842 if (inst == 0x6bc23fd9 || inst == 0x0fc212c1)
1843 save_rp = 0;
1844
1845 /* These are the only ways we save SP into the stack. At this time
1846 the HP compilers never bother to save SP into the stack. */
1847 if ((inst & 0xffffc000) == 0x6fc10000
1848 || (inst & 0xffffc00c) == 0x73c10008)
1849 save_sp = 0;
1850
1851 /* Are we loading some register with an offset from the argument
1852 pointer? */
1853 if ((inst & 0xffe00000) == 0x37a00000
1854 || (inst & 0xffffffe0) == 0x081d0240)
1855 {
1856 pc += 4;
1857 continue;
1858 }
1859
1860 /* Account for general and floating-point register saves. */
1861 reg_num = inst_saves_gr (inst);
1862 save_gr &= ~(1 << reg_num);
1863
1864 /* Ugh. Also account for argument stores into the stack.
1865 Unfortunately args_stored only tells us that some arguments
1866 where stored into the stack. Not how many or what kind!
1867
1868 This is a kludge as on the HP compiler sets this bit and it
1869 never does prologue scheduling. So once we see one, skip past
1870 all of them. We have similar code for the fp arg stores below.
1871
1872 FIXME. Can still die if we have a mix of GR and FR argument
1873 stores! */
1874 if (reg_num >= (TARGET_PTR_BIT == 64 ? 19 : 23) && reg_num <= 26)
1875 {
1876 while (reg_num >= (TARGET_PTR_BIT == 64 ? 19 : 23) && reg_num <= 26)
1877 {
1878 pc += 4;
1879 status = target_read_memory (pc, buf, 4);
1880 inst = extract_unsigned_integer (buf, 4);
1881 if (status != 0)
1882 return pc;
1883 reg_num = inst_saves_gr (inst);
1884 }
1885 args_stored = 0;
1886 continue;
1887 }
1888
1889 reg_num = inst_saves_fr (inst);
1890 save_fr &= ~(1 << reg_num);
1891
1892 status = target_read_memory (pc + 4, buf, 4);
1893 next_inst = extract_unsigned_integer (buf, 4);
1894
1895 /* Yow! */
1896 if (status != 0)
1897 return pc;
1898
1899 /* We've got to be read to handle the ldo before the fp register
1900 save. */
1901 if ((inst & 0xfc000000) == 0x34000000
1902 && inst_saves_fr (next_inst) >= 4
1903 && inst_saves_fr (next_inst) <= (TARGET_PTR_BIT == 64 ? 11 : 7))
1904 {
1905 /* So we drop into the code below in a reasonable state. */
1906 reg_num = inst_saves_fr (next_inst);
1907 pc -= 4;
1908 }
1909
1910 /* Ugh. Also account for argument stores into the stack.
1911 This is a kludge as on the HP compiler sets this bit and it
1912 never does prologue scheduling. So once we see one, skip past
1913 all of them. */
1914 if (reg_num >= 4 && reg_num <= (TARGET_PTR_BIT == 64 ? 11 : 7))
1915 {
1916 while (reg_num >= 4 && reg_num <= (TARGET_PTR_BIT == 64 ? 11 : 7))
1917 {
1918 pc += 8;
1919 status = target_read_memory (pc, buf, 4);
1920 inst = extract_unsigned_integer (buf, 4);
1921 if (status != 0)
1922 return pc;
1923 if ((inst & 0xfc000000) != 0x34000000)
1924 break;
1925 status = target_read_memory (pc + 4, buf, 4);
1926 next_inst = extract_unsigned_integer (buf, 4);
1927 if (status != 0)
1928 return pc;
1929 reg_num = inst_saves_fr (next_inst);
1930 }
1931 args_stored = 0;
1932 continue;
1933 }
1934
1935 /* Quit if we hit any kind of branch. This can happen if a prologue
1936 instruction is in the delay slot of the first call/branch. */
1937 if (is_branch (inst))
1938 break;
1939
1940 /* What a crock. The HP compilers set args_stored even if no
1941 arguments were stored into the stack (boo hiss). This could
1942 cause this code to then skip a bunch of user insns (up to the
1943 first branch).
1944
1945 To combat this we try to identify when args_stored was bogusly
1946 set and clear it. We only do this when args_stored is nonzero,
1947 all other resources are accounted for, and nothing changed on
1948 this pass. */
1949 if (args_stored
1950 && !(save_gr || save_fr || save_rp || save_sp || stack_remaining > 0)
1951 && old_save_gr == save_gr && old_save_fr == save_fr
1952 && old_save_rp == save_rp && old_save_sp == save_sp
1953 && old_stack_remaining == stack_remaining)
1954 break;
1955
1956 /* Bump the PC. */
1957 pc += 4;
1958 }
1959
1960 /* We've got a tenative location for the end of the prologue. However
1961 because of limitations in the unwind descriptor mechanism we may
1962 have went too far into user code looking for the save of a register
1963 that does not exist. So, if there registers we expected to be saved
1964 but never were, mask them out and restart.
1965
1966 This should only happen in optimized code, and should be very rare. */
1967 if (save_gr || (save_fr && !(restart_fr || restart_gr)))
1968 {
1969 pc = orig_pc;
1970 restart_gr = save_gr;
1971 restart_fr = save_fr;
1972 goto restart;
1973 }
1974
1975 return pc;
1976 }
1977
1978
1979 /* Return the address of the PC after the last prologue instruction if
1980 we can determine it from the debug symbols. Else return zero. */
1981
1982 static CORE_ADDR
1983 after_prologue (CORE_ADDR pc)
1984 {
1985 struct symtab_and_line sal;
1986 CORE_ADDR func_addr, func_end;
1987 struct symbol *f;
1988
1989 /* If we can not find the symbol in the partial symbol table, then
1990 there is no hope we can determine the function's start address
1991 with this code. */
1992 if (!find_pc_partial_function (pc, NULL, &func_addr, &func_end))
1993 return 0;
1994
1995 /* Get the line associated with FUNC_ADDR. */
1996 sal = find_pc_line (func_addr, 0);
1997
1998 /* There are only two cases to consider. First, the end of the source line
1999 is within the function bounds. In that case we return the end of the
2000 source line. Second is the end of the source line extends beyond the
2001 bounds of the current function. We need to use the slow code to
2002 examine instructions in that case.
2003
2004 Anything else is simply a bug elsewhere. Fixing it here is absolutely
2005 the wrong thing to do. In fact, it should be entirely possible for this
2006 function to always return zero since the slow instruction scanning code
2007 is supposed to *always* work. If it does not, then it is a bug. */
2008 if (sal.end < func_end)
2009 return sal.end;
2010 else
2011 return 0;
2012 }
2013
2014 /* To skip prologues, I use this predicate. Returns either PC itself
2015 if the code at PC does not look like a function prologue; otherwise
2016 returns an address that (if we're lucky) follows the prologue. If
2017 LENIENT, then we must skip everything which is involved in setting
2018 up the frame (it's OK to skip more, just so long as we don't skip
2019 anything which might clobber the registers which are being saved.
2020 Currently we must not skip more on the alpha, but we might the lenient
2021 stuff some day. */
2022
2023 static CORE_ADDR
2024 hppa_skip_prologue (CORE_ADDR pc)
2025 {
2026 unsigned long inst;
2027 int offset;
2028 CORE_ADDR post_prologue_pc;
2029 char buf[4];
2030
2031 /* See if we can determine the end of the prologue via the symbol table.
2032 If so, then return either PC, or the PC after the prologue, whichever
2033 is greater. */
2034
2035 post_prologue_pc = after_prologue (pc);
2036
2037 /* If after_prologue returned a useful address, then use it. Else
2038 fall back on the instruction skipping code.
2039
2040 Some folks have claimed this causes problems because the breakpoint
2041 may be the first instruction of the prologue. If that happens, then
2042 the instruction skipping code has a bug that needs to be fixed. */
2043 if (post_prologue_pc != 0)
2044 return max (pc, post_prologue_pc);
2045 else
2046 return (skip_prologue_hard_way (pc));
2047 }
2048
2049 struct hppa_frame_cache
2050 {
2051 CORE_ADDR base;
2052 struct trad_frame_saved_reg *saved_regs;
2053 };
2054
2055 static struct hppa_frame_cache *
2056 hppa_frame_cache (struct frame_info *next_frame, void **this_cache)
2057 {
2058 struct hppa_frame_cache *cache;
2059 long saved_gr_mask;
2060 long saved_fr_mask;
2061 CORE_ADDR this_sp;
2062 long frame_size;
2063 struct unwind_table_entry *u;
2064 int i;
2065
2066 if (hppa_debug)
2067 fprintf_unfiltered (gdb_stdlog, "{ hppa_frame_cache (frame=%d) -> ",
2068 frame_relative_level(next_frame));
2069
2070 if ((*this_cache) != NULL)
2071 {
2072 if (hppa_debug)
2073 fprintf_unfiltered (gdb_stdlog, "base=0x%s (cached) }",
2074 paddr_nz (((struct hppa_frame_cache *)*this_cache)->base));
2075 return (*this_cache);
2076 }
2077 cache = FRAME_OBSTACK_ZALLOC (struct hppa_frame_cache);
2078 (*this_cache) = cache;
2079 cache->saved_regs = trad_frame_alloc_saved_regs (next_frame);
2080
2081 /* Yow! */
2082 u = find_unwind_entry (frame_func_unwind (next_frame));
2083 if (!u)
2084 {
2085 if (hppa_debug)
2086 fprintf_unfiltered (gdb_stdlog, "base=NULL (no unwind entry) }");
2087 return (*this_cache);
2088 }
2089
2090 /* Turn the Entry_GR field into a bitmask. */
2091 saved_gr_mask = 0;
2092 for (i = 3; i < u->Entry_GR + 3; i++)
2093 {
2094 /* Frame pointer gets saved into a special location. */
2095 if (u->Save_SP && i == HPPA_FP_REGNUM)
2096 continue;
2097
2098 saved_gr_mask |= (1 << i);
2099 }
2100
2101 /* Turn the Entry_FR field into a bitmask too. */
2102 saved_fr_mask = 0;
2103 for (i = 12; i < u->Entry_FR + 12; i++)
2104 saved_fr_mask |= (1 << i);
2105
2106 /* Loop until we find everything of interest or hit a branch.
2107
2108 For unoptimized GCC code and for any HP CC code this will never ever
2109 examine any user instructions.
2110
2111 For optimized GCC code we're faced with problems. GCC will schedule
2112 its prologue and make prologue instructions available for delay slot
2113 filling. The end result is user code gets mixed in with the prologue
2114 and a prologue instruction may be in the delay slot of the first branch
2115 or call.
2116
2117 Some unexpected things are expected with debugging optimized code, so
2118 we allow this routine to walk past user instructions in optimized
2119 GCC code. */
2120 {
2121 int final_iteration = 0;
2122 CORE_ADDR pc;
2123 CORE_ADDR end_pc;
2124 int looking_for_sp = u->Save_SP;
2125 int looking_for_rp = u->Save_RP;
2126 int fp_loc = -1;
2127 end_pc = skip_prologue_using_sal (frame_func_unwind (next_frame));
2128 if (end_pc == 0)
2129 end_pc = frame_pc_unwind (next_frame);
2130 frame_size = 0;
2131 for (pc = frame_func_unwind (next_frame);
2132 ((saved_gr_mask || saved_fr_mask
2133 || looking_for_sp || looking_for_rp
2134 || frame_size < (u->Total_frame_size << 3))
2135 && pc <= end_pc);
2136 pc += 4)
2137 {
2138 int reg;
2139 char buf4[4];
2140 long status = target_read_memory (pc, buf4, sizeof buf4);
2141 long inst = extract_unsigned_integer (buf4, sizeof buf4);
2142
2143 /* Note the interesting effects of this instruction. */
2144 frame_size += prologue_inst_adjust_sp (inst);
2145
2146 /* There are limited ways to store the return pointer into the
2147 stack. */
2148 if (inst == 0x6bc23fd9) /* stw rp,-0x14(sr0,sp) */
2149 {
2150 looking_for_rp = 0;
2151 cache->saved_regs[RP_REGNUM].addr = -20;
2152 }
2153 else if (inst == 0x0fc212c1) /* std rp,-0x10(sr0,sp) */
2154 {
2155 looking_for_rp = 0;
2156 cache->saved_regs[RP_REGNUM].addr = -16;
2157 }
2158
2159 /* Check to see if we saved SP into the stack. This also
2160 happens to indicate the location of the saved frame
2161 pointer. */
2162 if ((inst & 0xffffc000) == 0x6fc10000 /* stw,ma r1,N(sr0,sp) */
2163 || (inst & 0xffffc00c) == 0x73c10008) /* std,ma r1,N(sr0,sp) */
2164 {
2165 looking_for_sp = 0;
2166 cache->saved_regs[HPPA_FP_REGNUM].addr = 0;
2167 }
2168
2169 /* Account for general and floating-point register saves. */
2170 reg = inst_saves_gr (inst);
2171 if (reg >= 3 && reg <= 18
2172 && (!u->Save_SP || reg != HPPA_FP_REGNUM))
2173 {
2174 saved_gr_mask &= ~(1 << reg);
2175 if ((inst >> 26) == 0x1b && extract_14 (inst) >= 0)
2176 /* stwm with a positive displacement is a _post_
2177 _modify_. */
2178 cache->saved_regs[reg].addr = 0;
2179 else if ((inst & 0xfc00000c) == 0x70000008)
2180 /* A std has explicit post_modify forms. */
2181 cache->saved_regs[reg].addr = 0;
2182 else
2183 {
2184 CORE_ADDR offset;
2185
2186 if ((inst >> 26) == 0x1c)
2187 offset = (inst & 0x1 ? -1 << 13 : 0) | (((inst >> 4) & 0x3ff) << 3);
2188 else if ((inst >> 26) == 0x03)
2189 offset = low_sign_extend (inst & 0x1f, 5);
2190 else
2191 offset = extract_14 (inst);
2192
2193 /* Handle code with and without frame pointers. */
2194 if (u->Save_SP)
2195 cache->saved_regs[reg].addr = offset;
2196 else
2197 cache->saved_regs[reg].addr = (u->Total_frame_size << 3) + offset;
2198 }
2199 }
2200
2201 /* GCC handles callee saved FP regs a little differently.
2202
2203 It emits an instruction to put the value of the start of
2204 the FP store area into %r1. It then uses fstds,ma with a
2205 basereg of %r1 for the stores.
2206
2207 HP CC emits them at the current stack pointer modifying the
2208 stack pointer as it stores each register. */
2209
2210 /* ldo X(%r3),%r1 or ldo X(%r30),%r1. */
2211 if ((inst & 0xffffc000) == 0x34610000
2212 || (inst & 0xffffc000) == 0x37c10000)
2213 fp_loc = extract_14 (inst);
2214
2215 reg = inst_saves_fr (inst);
2216 if (reg >= 12 && reg <= 21)
2217 {
2218 /* Note +4 braindamage below is necessary because the FP
2219 status registers are internally 8 registers rather than
2220 the expected 4 registers. */
2221 saved_fr_mask &= ~(1 << reg);
2222 if (fp_loc == -1)
2223 {
2224 /* 1st HP CC FP register store. After this
2225 instruction we've set enough state that the GCC and
2226 HPCC code are both handled in the same manner. */
2227 cache->saved_regs[reg + FP4_REGNUM + 4].addr = 0;
2228 fp_loc = 8;
2229 }
2230 else
2231 {
2232 cache->saved_regs[reg + HPPA_FP0_REGNUM + 4].addr = fp_loc;
2233 fp_loc += 8;
2234 }
2235 }
2236
2237 /* Quit if we hit any kind of branch the previous iteration. */
2238 if (final_iteration)
2239 break;
2240 /* We want to look precisely one instruction beyond the branch
2241 if we have not found everything yet. */
2242 if (is_branch (inst))
2243 final_iteration = 1;
2244 }
2245 }
2246
2247 {
2248 /* The frame base always represents the value of %sp at entry to
2249 the current function (and is thus equivalent to the "saved"
2250 stack pointer. */
2251 CORE_ADDR this_sp = frame_unwind_register_unsigned (next_frame, HPPA_SP_REGNUM);
2252 /* FIXME: cagney/2004-02-22: This assumes that the frame has been
2253 created. If it hasn't everything will be out-of-wack. */
2254 if (u->Save_SP && trad_frame_addr_p (cache->saved_regs, HPPA_SP_REGNUM))
2255 /* Both we're expecting the SP to be saved and the SP has been
2256 saved. The entry SP value is saved at this frame's SP
2257 address. */
2258 cache->base = read_memory_integer (this_sp, TARGET_PTR_BIT / 8);
2259 else
2260 /* The prologue has been slowly allocating stack space. Adjust
2261 the SP back. */
2262 cache->base = this_sp - frame_size;
2263 trad_frame_set_value (cache->saved_regs, HPPA_SP_REGNUM, cache->base);
2264 }
2265
2266 /* The PC is found in the "return register", "Millicode" uses "r31"
2267 as the return register while normal code uses "rp". */
2268 if (u->Millicode)
2269 cache->saved_regs[PCOQ_HEAD_REGNUM] = cache->saved_regs[31];
2270 else
2271 cache->saved_regs[PCOQ_HEAD_REGNUM] = cache->saved_regs[RP_REGNUM];
2272
2273 {
2274 /* Convert all the offsets into addresses. */
2275 int reg;
2276 for (reg = 0; reg < NUM_REGS; reg++)
2277 {
2278 if (trad_frame_addr_p (cache->saved_regs, reg))
2279 cache->saved_regs[reg].addr += cache->base;
2280 }
2281 }
2282
2283 if (hppa_debug)
2284 fprintf_unfiltered (gdb_stdlog, "base=0x%s }",
2285 paddr_nz (((struct hppa_frame_cache *)*this_cache)->base));
2286 return (*this_cache);
2287 }
2288
2289 static void
2290 hppa_frame_this_id (struct frame_info *next_frame, void **this_cache,
2291 struct frame_id *this_id)
2292 {
2293 struct hppa_frame_cache *info = hppa_frame_cache (next_frame, this_cache);
2294 (*this_id) = frame_id_build (info->base, frame_func_unwind (next_frame));
2295 }
2296
2297 static void
2298 hppa_frame_prev_register (struct frame_info *next_frame,
2299 void **this_cache,
2300 int regnum, int *optimizedp,
2301 enum lval_type *lvalp, CORE_ADDR *addrp,
2302 int *realnump, void *valuep)
2303 {
2304 struct hppa_frame_cache *info = hppa_frame_cache (next_frame, this_cache);
2305 struct gdbarch *gdbarch = get_frame_arch (next_frame);
2306 if (regnum == PCOQ_TAIL_REGNUM)
2307 {
2308 /* The PCOQ TAIL, or NPC, needs to be computed from the unwound
2309 PC register. */
2310 *optimizedp = 0;
2311 *lvalp = not_lval;
2312 *addrp = 0;
2313 *realnump = 0;
2314 if (valuep)
2315 {
2316 int regsize = register_size (gdbarch, PCOQ_HEAD_REGNUM);
2317 CORE_ADDR pc;
2318 int optimized;
2319 enum lval_type lval;
2320 CORE_ADDR addr;
2321 int realnum;
2322 bfd_byte value[MAX_REGISTER_SIZE];
2323 trad_frame_prev_register (next_frame, info->saved_regs,
2324 PCOQ_HEAD_REGNUM, &optimized, &lval, &addr,
2325 &realnum, &value);
2326 pc = extract_unsigned_integer (&value, regsize);
2327 store_unsigned_integer (valuep, regsize, pc + 4);
2328 }
2329 }
2330 else
2331 {
2332 trad_frame_prev_register (next_frame, info->saved_regs, regnum,
2333 optimizedp, lvalp, addrp, realnump, valuep);
2334 }
2335 }
2336
2337 static const struct frame_unwind hppa_frame_unwind =
2338 {
2339 NORMAL_FRAME,
2340 hppa_frame_this_id,
2341 hppa_frame_prev_register
2342 };
2343
2344 static const struct frame_unwind *
2345 hppa_frame_unwind_sniffer (struct frame_info *next_frame)
2346 {
2347 return &hppa_frame_unwind;
2348 }
2349
2350 static CORE_ADDR
2351 hppa_frame_base_address (struct frame_info *next_frame,
2352 void **this_cache)
2353 {
2354 struct hppa_frame_cache *info = hppa_frame_cache (next_frame,
2355 this_cache);
2356 return info->base;
2357 }
2358
2359 static const struct frame_base hppa_frame_base = {
2360 &hppa_frame_unwind,
2361 hppa_frame_base_address,
2362 hppa_frame_base_address,
2363 hppa_frame_base_address
2364 };
2365
2366 static const struct frame_base *
2367 hppa_frame_base_sniffer (struct frame_info *next_frame)
2368 {
2369 return &hppa_frame_base;
2370 }
2371
2372 static struct frame_id
2373 hppa_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
2374 {
2375 return frame_id_build (frame_unwind_register_unsigned (next_frame,
2376 HPPA_SP_REGNUM),
2377 frame_pc_unwind (next_frame));
2378 }
2379
2380 static CORE_ADDR
2381 hppa_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
2382 {
2383 return frame_unwind_register_signed (next_frame, PCOQ_HEAD_REGNUM) & ~3;
2384 }
2385
2386 /* Instead of this nasty cast, add a method pvoid() that prints out a
2387 host VOID data type (remember %p isn't portable). */
2388
2389 static CORE_ADDR
2390 hppa_pointer_to_address_hack (void *ptr)
2391 {
2392 gdb_assert (sizeof (ptr) == TYPE_LENGTH (builtin_type_void_data_ptr));
2393 return POINTER_TO_ADDRESS (builtin_type_void_data_ptr, &ptr);
2394 }
2395
2396 static void
2397 unwind_command (char *exp, int from_tty)
2398 {
2399 CORE_ADDR address;
2400 struct unwind_table_entry *u;
2401
2402 /* If we have an expression, evaluate it and use it as the address. */
2403
2404 if (exp != 0 && *exp != 0)
2405 address = parse_and_eval_address (exp);
2406 else
2407 return;
2408
2409 u = find_unwind_entry (address);
2410
2411 if (!u)
2412 {
2413 printf_unfiltered ("Can't find unwind table entry for %s\n", exp);
2414 return;
2415 }
2416
2417 printf_unfiltered ("unwind_table_entry (0x%s):\n",
2418 paddr_nz (hppa_pointer_to_address_hack (u)));
2419
2420 printf_unfiltered ("\tregion_start = ");
2421 print_address (u->region_start, gdb_stdout);
2422
2423 printf_unfiltered ("\n\tregion_end = ");
2424 print_address (u->region_end, gdb_stdout);
2425
2426 #define pif(FLD) if (u->FLD) printf_unfiltered (" "#FLD);
2427
2428 printf_unfiltered ("\n\tflags =");
2429 pif (Cannot_unwind);
2430 pif (Millicode);
2431 pif (Millicode_save_sr0);
2432 pif (Entry_SR);
2433 pif (Args_stored);
2434 pif (Variable_Frame);
2435 pif (Separate_Package_Body);
2436 pif (Frame_Extension_Millicode);
2437 pif (Stack_Overflow_Check);
2438 pif (Two_Instruction_SP_Increment);
2439 pif (Ada_Region);
2440 pif (Save_SP);
2441 pif (Save_RP);
2442 pif (Save_MRP_in_frame);
2443 pif (extn_ptr_defined);
2444 pif (Cleanup_defined);
2445 pif (MPE_XL_interrupt_marker);
2446 pif (HP_UX_interrupt_marker);
2447 pif (Large_frame);
2448
2449 putchar_unfiltered ('\n');
2450
2451 #define pin(FLD) printf_unfiltered ("\t"#FLD" = 0x%x\n", u->FLD);
2452
2453 pin (Region_description);
2454 pin (Entry_FR);
2455 pin (Entry_GR);
2456 pin (Total_frame_size);
2457 }
2458
2459 void
2460 hppa_skip_permanent_breakpoint (void)
2461 {
2462 /* To step over a breakpoint instruction on the PA takes some
2463 fiddling with the instruction address queue.
2464
2465 When we stop at a breakpoint, the IA queue front (the instruction
2466 we're executing now) points at the breakpoint instruction, and
2467 the IA queue back (the next instruction to execute) points to
2468 whatever instruction we would execute after the breakpoint, if it
2469 were an ordinary instruction. This is the case even if the
2470 breakpoint is in the delay slot of a branch instruction.
2471
2472 Clearly, to step past the breakpoint, we need to set the queue
2473 front to the back. But what do we put in the back? What
2474 instruction comes after that one? Because of the branch delay
2475 slot, the next insn is always at the back + 4. */
2476 write_register (PCOQ_HEAD_REGNUM, read_register (PCOQ_TAIL_REGNUM));
2477 write_register (PCSQ_HEAD_REGNUM, read_register (PCSQ_TAIL_REGNUM));
2478
2479 write_register (PCOQ_TAIL_REGNUM, read_register (PCOQ_TAIL_REGNUM) + 4);
2480 /* We can leave the tail's space the same, since there's no jump. */
2481 }
2482
2483 int
2484 hppa_pc_requires_run_before_use (CORE_ADDR pc)
2485 {
2486 /* Sometimes we may pluck out a minimal symbol that has a negative address.
2487
2488 An example of this occurs when an a.out is linked against a foo.sl.
2489 The foo.sl defines a global bar(), and the a.out declares a signature
2490 for bar(). However, the a.out doesn't directly call bar(), but passes
2491 its address in another call.
2492
2493 If you have this scenario and attempt to "break bar" before running,
2494 gdb will find a minimal symbol for bar() in the a.out. But that
2495 symbol's address will be negative. What this appears to denote is
2496 an index backwards from the base of the procedure linkage table (PLT)
2497 into the data linkage table (DLT), the end of which is contiguous
2498 with the start of the PLT. This is clearly not a valid address for
2499 us to set a breakpoint on.
2500
2501 Note that one must be careful in how one checks for a negative address.
2502 0xc0000000 is a legitimate address of something in a shared text
2503 segment, for example. Since I don't know what the possible range
2504 is of these "really, truly negative" addresses that come from the
2505 minimal symbols, I'm resorting to the gross hack of checking the
2506 top byte of the address for all 1's. Sigh. */
2507
2508 return (!target_has_stack && (pc & 0xFF000000));
2509 }
2510
2511 int
2512 hppa_instruction_nullified (void)
2513 {
2514 /* brobecker 2002/11/07: Couldn't we use a ULONGEST here? It would
2515 avoid the type cast. I'm leaving it as is for now as I'm doing
2516 semi-mechanical multiarching-related changes. */
2517 const int ipsw = (int) read_register (IPSW_REGNUM);
2518 const int flags = (int) read_register (FLAGS_REGNUM);
2519
2520 return ((ipsw & 0x00200000) && !(flags & 0x2));
2521 }
2522
2523 /* Return the GDB type object for the "standard" data type of data
2524 in register N. */
2525
2526 static struct type *
2527 hppa32_register_type (struct gdbarch *gdbarch, int reg_nr)
2528 {
2529 if (reg_nr < FP4_REGNUM)
2530 return builtin_type_uint32;
2531 else
2532 return builtin_type_ieee_single_big;
2533 }
2534
2535 /* Return the GDB type object for the "standard" data type of data
2536 in register N. hppa64 version. */
2537
2538 static struct type *
2539 hppa64_register_type (struct gdbarch *gdbarch, int reg_nr)
2540 {
2541 if (reg_nr < FP4_REGNUM)
2542 return builtin_type_uint64;
2543 else
2544 return builtin_type_ieee_double_big;
2545 }
2546
2547 /* Return True if REGNUM is not a register available to the user
2548 through ptrace(). */
2549
2550 static int
2551 hppa_cannot_store_register (int regnum)
2552 {
2553 return (regnum == 0
2554 || regnum == PCSQ_HEAD_REGNUM
2555 || (regnum >= PCSQ_TAIL_REGNUM && regnum < IPSW_REGNUM)
2556 || (regnum > IPSW_REGNUM && regnum < FP4_REGNUM));
2557
2558 }
2559
2560 static CORE_ADDR
2561 hppa_smash_text_address (CORE_ADDR addr)
2562 {
2563 /* The low two bits of the PC on the PA contain the privilege level.
2564 Some genius implementing a (non-GCC) compiler apparently decided
2565 this means that "addresses" in a text section therefore include a
2566 privilege level, and thus symbol tables should contain these bits.
2567 This seems like a bonehead thing to do--anyway, it seems to work
2568 for our purposes to just ignore those bits. */
2569
2570 return (addr &= ~0x3);
2571 }
2572
2573 /* Get the ith function argument for the current function. */
2574 CORE_ADDR
2575 hppa_fetch_pointer_argument (struct frame_info *frame, int argi,
2576 struct type *type)
2577 {
2578 CORE_ADDR addr;
2579 get_frame_register (frame, R0_REGNUM + 26 - argi, &addr);
2580 return addr;
2581 }
2582
2583 static void
2584 hppa_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
2585 int regnum, void *buf)
2586 {
2587 ULONGEST tmp;
2588
2589 regcache_raw_read_unsigned (regcache, regnum, &tmp);
2590 if (regnum == PCOQ_HEAD_REGNUM || regnum == PCOQ_TAIL_REGNUM)
2591 tmp &= ~0x3;
2592 store_unsigned_integer (buf, sizeof(tmp), tmp);
2593 }
2594
2595 /* Here is a table of C type sizes on hppa with various compiles
2596 and options. I measured this on PA 9000/800 with HP-UX 11.11
2597 and these compilers:
2598
2599 /usr/ccs/bin/cc HP92453-01 A.11.01.21
2600 /opt/ansic/bin/cc HP92453-01 B.11.11.28706.GP
2601 /opt/aCC/bin/aCC B3910B A.03.45
2602 gcc gcc 3.3.2 native hppa2.0w-hp-hpux11.11
2603
2604 cc : 1 2 4 4 8 : 4 8 -- : 4 4
2605 ansic +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4
2606 ansic +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4
2607 ansic +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8
2608 acc +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4
2609 acc +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4
2610 acc +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8
2611 gcc : 1 2 4 4 8 : 4 8 16 : 4 4
2612
2613 Each line is:
2614
2615 compiler and options
2616 char, short, int, long, long long
2617 float, double, long double
2618 char *, void (*)()
2619
2620 So all these compilers use either ILP32 or LP64 model.
2621 TODO: gcc has more options so it needs more investigation.
2622
2623 For floating point types, see:
2624
2625 http://docs.hp.com/hpux/pdf/B3906-90006.pdf
2626 HP-UX floating-point guide, hpux 11.00
2627
2628 -- chastain 2003-12-18 */
2629
2630 static struct gdbarch *
2631 hppa_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2632 {
2633 struct gdbarch_tdep *tdep;
2634 struct gdbarch *gdbarch;
2635
2636 /* Try to determine the ABI of the object we are loading. */
2637 if (info.abfd != NULL && info.osabi == GDB_OSABI_UNKNOWN)
2638 {
2639 /* If it's a SOM file, assume it's HP/UX SOM. */
2640 if (bfd_get_flavour (info.abfd) == bfd_target_som_flavour)
2641 info.osabi = GDB_OSABI_HPUX_SOM;
2642 }
2643
2644 /* find a candidate among the list of pre-declared architectures. */
2645 arches = gdbarch_list_lookup_by_info (arches, &info);
2646 if (arches != NULL)
2647 return (arches->gdbarch);
2648
2649 /* If none found, then allocate and initialize one. */
2650 tdep = XZALLOC (struct gdbarch_tdep);
2651 gdbarch = gdbarch_alloc (&info, tdep);
2652
2653 /* Determine from the bfd_arch_info structure if we are dealing with
2654 a 32 or 64 bits architecture. If the bfd_arch_info is not available,
2655 then default to a 32bit machine. */
2656 if (info.bfd_arch_info != NULL)
2657 tdep->bytes_per_address =
2658 info.bfd_arch_info->bits_per_address / info.bfd_arch_info->bits_per_byte;
2659 else
2660 tdep->bytes_per_address = 4;
2661
2662 /* Some parts of the gdbarch vector depend on whether we are running
2663 on a 32 bits or 64 bits target. */
2664 switch (tdep->bytes_per_address)
2665 {
2666 case 4:
2667 set_gdbarch_num_regs (gdbarch, hppa32_num_regs);
2668 set_gdbarch_register_name (gdbarch, hppa32_register_name);
2669 set_gdbarch_register_type (gdbarch, hppa32_register_type);
2670 break;
2671 case 8:
2672 set_gdbarch_num_regs (gdbarch, hppa64_num_regs);
2673 set_gdbarch_register_name (gdbarch, hppa64_register_name);
2674 set_gdbarch_register_type (gdbarch, hppa64_register_type);
2675 break;
2676 default:
2677 internal_error (__FILE__, __LINE__, "Unsupported address size: %d",
2678 tdep->bytes_per_address);
2679 }
2680
2681 set_gdbarch_long_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT);
2682 set_gdbarch_ptr_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT);
2683
2684 /* The following gdbarch vector elements are the same in both ILP32
2685 and LP64, but might show differences some day. */
2686 set_gdbarch_long_long_bit (gdbarch, 64);
2687 set_gdbarch_long_double_bit (gdbarch, 128);
2688 set_gdbarch_long_double_format (gdbarch, &floatformat_ia64_quad_big);
2689
2690 /* The following gdbarch vector elements do not depend on the address
2691 size, or in any other gdbarch element previously set. */
2692 set_gdbarch_skip_prologue (gdbarch, hppa_skip_prologue);
2693 set_gdbarch_skip_trampoline_code (gdbarch, hppa_skip_trampoline_code);
2694 set_gdbarch_in_solib_call_trampoline (gdbarch, hppa_in_solib_call_trampoline);
2695 set_gdbarch_in_solib_return_trampoline (gdbarch,
2696 hppa_in_solib_return_trampoline);
2697 set_gdbarch_inner_than (gdbarch, core_addr_greaterthan);
2698 set_gdbarch_sp_regnum (gdbarch, HPPA_SP_REGNUM);
2699 set_gdbarch_fp0_regnum (gdbarch, HPPA_FP0_REGNUM);
2700 set_gdbarch_cannot_store_register (gdbarch, hppa_cannot_store_register);
2701 set_gdbarch_addr_bits_remove (gdbarch, hppa_smash_text_address);
2702 set_gdbarch_smash_text_address (gdbarch, hppa_smash_text_address);
2703 set_gdbarch_believe_pcc_promotion (gdbarch, 1);
2704 set_gdbarch_read_pc (gdbarch, hppa_target_read_pc);
2705 set_gdbarch_write_pc (gdbarch, hppa_target_write_pc);
2706
2707 /* Helper for function argument information. */
2708 set_gdbarch_fetch_pointer_argument (gdbarch, hppa_fetch_pointer_argument);
2709
2710 set_gdbarch_print_insn (gdbarch, print_insn_hppa);
2711
2712 /* When a hardware watchpoint triggers, we'll move the inferior past
2713 it by removing all eventpoints; stepping past the instruction
2714 that caused the trigger; reinserting eventpoints; and checking
2715 whether any watched location changed. */
2716 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2717
2718 /* Inferior function call methods. */
2719 switch (tdep->bytes_per_address)
2720 {
2721 case 4:
2722 set_gdbarch_push_dummy_call (gdbarch, hppa32_push_dummy_call);
2723 set_gdbarch_frame_align (gdbarch, hppa32_frame_align);
2724 break;
2725 case 8:
2726 set_gdbarch_push_dummy_call (gdbarch, hppa64_push_dummy_call);
2727 set_gdbarch_frame_align (gdbarch, hppa64_frame_align);
2728 break;
2729 default:
2730 internal_error (__FILE__, __LINE__, "bad switch");
2731 }
2732
2733 /* Struct return methods. */
2734 switch (tdep->bytes_per_address)
2735 {
2736 case 4:
2737 set_gdbarch_return_value (gdbarch, hppa32_return_value);
2738 break;
2739 case 8:
2740 set_gdbarch_return_value (gdbarch, hppa64_return_value);
2741 break;
2742 default:
2743 internal_error (__FILE__, __LINE__, "bad switch");
2744 }
2745
2746 set_gdbarch_breakpoint_from_pc (gdbarch, hppa_breakpoint_from_pc);
2747
2748 /* Frame unwind methods. */
2749 set_gdbarch_unwind_dummy_id (gdbarch, hppa_unwind_dummy_id);
2750 set_gdbarch_unwind_pc (gdbarch, hppa_unwind_pc);
2751 frame_unwind_append_sniffer (gdbarch, hppa_frame_unwind_sniffer);
2752 frame_base_append_sniffer (gdbarch, hppa_frame_base_sniffer);
2753
2754 set_gdbarch_pseudo_register_read (gdbarch, hppa_pseudo_register_read);
2755
2756 /* Hook in ABI-specific overrides, if they have been registered. */
2757 gdbarch_init_osabi (info, gdbarch);
2758
2759 return gdbarch;
2760 }
2761
2762 static void
2763 hppa_dump_tdep (struct gdbarch *current_gdbarch, struct ui_file *file)
2764 {
2765 struct gdbarch_tdep *tdep = gdbarch_tdep (current_gdbarch);
2766
2767 fprintf_unfiltered (file, "bytes_per_address = %d\n",
2768 tdep->bytes_per_address);
2769 fprintf_unfiltered (file, "elf = %s\n", tdep->is_elf ? "yes" : "no");
2770 }
2771
2772 void
2773 _initialize_hppa_tdep (void)
2774 {
2775 struct cmd_list_element *c;
2776 void break_at_finish_command (char *arg, int from_tty);
2777 void tbreak_at_finish_command (char *arg, int from_tty);
2778 void break_at_finish_at_depth_command (char *arg, int from_tty);
2779
2780 gdbarch_register (bfd_arch_hppa, hppa_gdbarch_init, hppa_dump_tdep);
2781
2782 hppa_objfile_priv_data = register_objfile_data ();
2783
2784 add_cmd ("unwind", class_maintenance, unwind_command,
2785 "Print unwind table entry at given address.",
2786 &maintenanceprintlist);
2787
2788 deprecate_cmd (add_com ("xbreak", class_breakpoint,
2789 break_at_finish_command,
2790 concat ("Set breakpoint at procedure exit. \n\
2791 Argument may be function name, or \"*\" and an address.\n\
2792 If function is specified, break at end of code for that function.\n\
2793 If an address is specified, break at the end of the function that contains \n\
2794 that exact address.\n",
2795 "With no arg, uses current execution address of selected stack frame.\n\
2796 This is useful for breaking on return to a stack frame.\n\
2797 \n\
2798 Multiple breakpoints at one place are permitted, and useful if conditional.\n\
2799 \n\
2800 Do \"help breakpoints\" for info on other commands dealing with breakpoints.", NULL)), NULL);
2801 deprecate_cmd (add_com_alias ("xb", "xbreak", class_breakpoint, 1), NULL);
2802 deprecate_cmd (add_com_alias ("xbr", "xbreak", class_breakpoint, 1), NULL);
2803 deprecate_cmd (add_com_alias ("xbre", "xbreak", class_breakpoint, 1), NULL);
2804 deprecate_cmd (add_com_alias ("xbrea", "xbreak", class_breakpoint, 1), NULL);
2805
2806 deprecate_cmd (c = add_com ("txbreak", class_breakpoint,
2807 tbreak_at_finish_command,
2808 "Set temporary breakpoint at procedure exit. Either there should\n\
2809 be no argument or the argument must be a depth.\n"), NULL);
2810 set_cmd_completer (c, location_completer);
2811
2812 if (xdb_commands)
2813 deprecate_cmd (add_com ("bx", class_breakpoint,
2814 break_at_finish_at_depth_command,
2815 "Set breakpoint at procedure exit. Either there should\n\
2816 be no argument or the argument must be a depth.\n"), NULL);
2817
2818 /* Debug this files internals. */
2819 add_show_from_set (add_set_cmd ("hppa", class_maintenance, var_zinteger,
2820 &hppa_debug, "Set hppa debugging.\n\
2821 When non-zero, hppa specific debugging is enabled.", &setdebuglist), &showdebuglist);
2822 }
2823
This page took 0.085675 seconds and 5 git commands to generate.