1 /* Copyright (C) 2010-2015 Free Software Foundation, Inc.
3 This file is part of GDB.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #include "ia64-tdep.h"
20 #include "ia64-hpux-tdep.h"
21 #include "solib-ia64-hpux.h"
29 #include "opcode/ia64.h"
34 /* Need to define the following macro in order to get the complete
35 load_module_desc struct definition in dlfcn.h Otherwise, it doesn't
36 match the size of the struct the loader is providing us during load
38 #define _LOAD_MODULE_DESC_EXT
40 #include <sys/ttrace.h>
43 #include <service_mgr.h>
45 /* The following is to have access to the definition of type load_info_t. */
48 /* The r32 pseudo-register number.
50 Like all stacked registers, r32 is treated as a pseudo-register,
51 because it is not always available for read/write via the ttrace
53 /* This is a bit of a hack, as we duplicate something hidden inside
54 ia64-tdep.c, but oh well... */
55 #define IA64_R32_PSEUDO_REGNUM (IA64_NAT127_REGNUM + 2)
57 /* Our struct so_list private data structure. */
61 /* The shared library module descriptor. We extract this structure
62 from the loader at the time the shared library gets mapped. */
63 struct load_module_desc module_desc
;
65 /* The text segment address as defined in the shared library object
66 (this is not the address where this segment got loaded). This
67 field is initially set to zero, and computed lazily. */
70 /* The data segment address as defined in the shared library object
71 (this is not the address where this segment got loaded). This
72 field is initially set to zero, and computed lazily. */
76 /* The list of shared libraries currently mapped by the inferior. */
78 static struct so_list
*so_list_head
= NULL
;
80 /* Create a new so_list element. The result should be deallocated
81 when no longer in use. */
83 static struct so_list
*
84 new_so_list (char *so_name
, struct load_module_desc module_desc
)
86 struct so_list
*new_so
;
88 new_so
= (struct so_list
*) XCNEW (struct so_list
);
89 new_so
->lm_info
= (struct lm_info
*) XCNEW (struct lm_info
);
90 new_so
->lm_info
->module_desc
= module_desc
;
92 strncpy (new_so
->so_name
, so_name
, SO_NAME_MAX_PATH_SIZE
- 1);
93 new_so
->so_name
[SO_NAME_MAX_PATH_SIZE
- 1] = '\0';
94 strcpy (new_so
->so_original_name
, new_so
->so_name
);
99 /* Return non-zero if the instruction at the current PC is a breakpoint
100 part of the dynamic loading process.
102 We identify such instructions by checking that the instruction at
103 the current pc is a break insn where no software breakpoint has been
104 inserted by us. We also verify that the operands have specific
105 known values, to be extra certain.
107 PTID is the ptid of the thread that should be checked, but this
108 function also assumes that inferior_ptid is already equal to PTID.
109 Ideally, we would like to avoid the requirement on inferior_ptid,
110 but many routines still use the inferior_ptid global to access
111 the relevant thread's register and memory. We still have the ptid
112 as parameter to be able to pass it to the routines that do take a ptid
113 - that way we avoid increasing explicit uses of the inferior_ptid
117 ia64_hpux_at_dld_breakpoint_1_p (ptid_t ptid
)
119 struct regcache
*regcache
= get_thread_regcache (ptid
);
120 CORE_ADDR pc
= regcache_read_pc (regcache
);
121 struct address_space
*aspace
= get_regcache_aspace (regcache
);
122 ia64_insn t0
, t1
, slot
[3], template, insn
;
126 /* If this is a regular breakpoint, then it can not be a dld one. */
127 if (breakpoint_inserted_here_p (aspace
, pc
))
130 slotnum
= ((long) pc
) & 0xf;
132 internal_error (__FILE__
, __LINE__
,
133 "invalid slot (%d) for address %s", slotnum
,
134 paddress (get_regcache_arch (regcache
), pc
));
137 read_memory (pc
, bundle
, sizeof (bundle
));
139 /* bundles are always in little-endian byte order */
140 t0
= bfd_getl64 (bundle
);
141 t1
= bfd_getl64 (bundle
+ 8);
142 template = (t0
>> 1) & 0xf;
143 slot
[0] = (t0
>> 5) & 0x1ffffffffffLL
;
144 slot
[1] = ((t0
>> 46) & 0x3ffff) | ((t1
& 0x7fffff) << 18);
145 slot
[2] = (t1
>> 23) & 0x1ffffffffffLL
;
147 if (template == 2 && slotnum
== 1)
149 /* skip L slot in MLI template: */
153 insn
= slot
[slotnum
];
155 return (insn
== 0x1c0c9c0 /* break.i 0x070327 */
156 || insn
== 0x3c0c9c0); /* break.i 0x0f0327 */
159 /* Same as ia64_hpux_at_dld_breakpoint_1_p above, with the following
160 differences: It temporarily sets inferior_ptid to PTID, and also
161 contains any exception being raised. */
164 ia64_hpux_at_dld_breakpoint_p (ptid_t ptid
)
166 volatile struct gdb_exception e
;
167 ptid_t saved_ptid
= inferior_ptid
;
170 inferior_ptid
= ptid
;
171 TRY_CATCH (e
, RETURN_MASK_ALL
)
173 result
= ia64_hpux_at_dld_breakpoint_1_p (ptid
);
175 inferior_ptid
= saved_ptid
;
177 warning (_("error while checking for dld breakpoint: %s"), e
.message
);
182 /* Handler for library load event: Read the information provided by
183 the loader, and then use it to read the shared library symbols. */
186 ia64_hpux_handle_load_event (struct regcache
*regcache
)
188 CORE_ADDR module_desc_addr
;
189 ULONGEST module_desc_size
;
190 CORE_ADDR so_path_addr
;
191 char so_path
[PATH_MAX
];
192 struct load_module_desc module_desc
;
193 struct so_list
*new_so
;
195 /* Extract the data provided by the loader as follow:
196 - r33: Address of load_module_desc structure
197 - r34: size of struct load_module_desc
198 - r35: Address of string holding shared library path
200 regcache_cooked_read_unsigned (regcache
, IA64_R32_PSEUDO_REGNUM
+ 1,
202 regcache_cooked_read_unsigned (regcache
, IA64_R32_PSEUDO_REGNUM
+ 2,
204 regcache_cooked_read_unsigned (regcache
, IA64_R32_PSEUDO_REGNUM
+ 3,
207 if (module_desc_size
!= sizeof (struct load_module_desc
))
208 warning (_("load_module_desc size (%ld) != size returned by kernel (%s)"),
209 sizeof (struct load_module_desc
),
210 pulongest (module_desc_size
));
212 read_memory_string (so_path_addr
, so_path
, PATH_MAX
);
213 read_memory (module_desc_addr
, (gdb_byte
*) &module_desc
,
214 sizeof (module_desc
));
216 /* Create a new so_list element and insert it at the start of our
217 so_list_head (we insert at the start of the list only because
218 it is less work compared to inserting it elsewhere). */
219 new_so
= new_so_list (so_path
, module_desc
);
220 new_so
->next
= so_list_head
;
221 so_list_head
= new_so
;
224 /* Update the value of the PC to point to the begining of the next
225 instruction bundle. */
228 ia64_hpux_move_pc_to_next_bundle (struct regcache
*regcache
)
230 CORE_ADDR pc
= regcache_read_pc (regcache
);
234 ia64_write_pc (regcache
, pc
);
237 /* Handle loader events.
239 PTID is the ptid of the thread corresponding to the event being
240 handled. Similarly to ia64_hpux_at_dld_breakpoint_1_p, this
241 function assumes that inferior_ptid is set to PTID. */
244 ia64_hpux_handle_dld_breakpoint_1 (ptid_t ptid
)
246 struct regcache
*regcache
= get_thread_regcache (ptid
);
249 /* The type of event is provided by the loaded via r32. */
250 regcache_cooked_read_unsigned (regcache
, IA64_R32_PSEUDO_REGNUM
, &arg0
);
253 case BREAK_DE_SVC_LOADED
:
254 /* Currently, the only service loads are uld and dld,
255 so we shouldn't need to do anything. Just ignore. */
257 case BREAK_DE_LIB_LOADED
:
258 ia64_hpux_handle_load_event (regcache
);
259 solib_add (NULL
, 0, ¤t_target
, auto_solib_add
);
261 case BREAK_DE_LIB_UNLOADED
:
262 case BREAK_DE_LOAD_COMPLETE
:
264 /* Ignore for now. */
268 /* Now that we have handled the event, we can move the PC to
269 the next instruction bundle, past the break instruction. */
270 ia64_hpux_move_pc_to_next_bundle (regcache
);
273 /* Same as ia64_hpux_handle_dld_breakpoint_1 above, with the following
274 differences: This function temporarily sets inferior_ptid to PTID,
275 and also contains any exception. */
278 ia64_hpux_handle_dld_breakpoint (ptid_t ptid
)
280 volatile struct gdb_exception e
;
281 ptid_t saved_ptid
= inferior_ptid
;
283 inferior_ptid
= ptid
;
284 TRY_CATCH (e
, RETURN_MASK_ALL
)
286 ia64_hpux_handle_dld_breakpoint_1 (ptid
);
288 inferior_ptid
= saved_ptid
;
290 warning (_("error detected while handling dld breakpoint: %s"), e
.message
);
293 /* Find the address of the code and data segments in ABFD, and update
294 TEXT_START and DATA_START accordingly. */
297 ia64_hpux_find_start_vma (bfd
*abfd
, CORE_ADDR
*text_start
,
298 CORE_ADDR
*data_start
)
300 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
307 if (bfd_seek (abfd
, i_ehdrp
->e_phoff
, SEEK_SET
) == -1)
308 error (_("invalid program header offset in %s"), abfd
->filename
);
310 for (i
= 0; i
< i_ehdrp
->e_phnum
; i
++)
312 if (bfd_bread (&phdr
, sizeof (phdr
), abfd
) != sizeof (phdr
))
313 error (_("failed to read segment %d in %s"), i
, abfd
->filename
);
315 if (phdr
.p_flags
& PF_X
316 && (*text_start
== 0 || phdr
.p_vaddr
< *text_start
))
317 *text_start
= phdr
.p_vaddr
;
319 if (phdr
.p_flags
& PF_W
320 && (*data_start
== 0 || phdr
.p_vaddr
< *data_start
))
321 *data_start
= phdr
.p_vaddr
;
325 /* The "relocate_section_addresses" target_so_ops routine for ia64-hpux. */
328 ia64_hpux_relocate_section_addresses (struct so_list
*so
,
329 struct target_section
*sec
)
331 CORE_ADDR offset
= 0;
333 /* If we haven't computed the text & data segment addresses, do so now.
334 We do this here, because we now have direct access to the associated
335 bfd, whereas we would have had to open our own if we wanted to do it
336 while processing the library-load event. */
337 if (so
->lm_info
->text_start
== 0 && so
->lm_info
->data_start
== 0)
338 ia64_hpux_find_start_vma (sec
->the_bfd_section
->owner
,
339 &so
->lm_info
->text_start
,
340 &so
->lm_info
->data_start
);
342 /* Determine the relocation offset based on which segment
343 the section belongs to. */
344 if ((so
->lm_info
->text_start
< so
->lm_info
->data_start
345 && sec
->addr
< so
->lm_info
->data_start
)
346 || (so
->lm_info
->text_start
> so
->lm_info
->data_start
347 && sec
->addr
>= so
->lm_info
->text_start
))
348 offset
= so
->lm_info
->module_desc
.text_base
- so
->lm_info
->text_start
;
349 else if ((so
->lm_info
->text_start
< so
->lm_info
->data_start
350 && sec
->addr
>= so
->lm_info
->data_start
)
351 || (so
->lm_info
->text_start
> so
->lm_info
->data_start
352 && sec
->addr
< so
->lm_info
->text_start
))
353 offset
= so
->lm_info
->module_desc
.data_base
- so
->lm_info
->data_start
;
355 /* And now apply the relocation. */
357 sec
->endaddr
+= offset
;
359 /* Best effort to set addr_high/addr_low. This is used only by
360 'info sharedlibrary'. */
361 if (so
->addr_low
== 0 || sec
->addr
< so
->addr_low
)
362 so
->addr_low
= sec
->addr
;
364 if (so
->addr_high
== 0 || sec
->endaddr
> so
->addr_high
)
365 so
->addr_high
= sec
->endaddr
;
368 /* The "free_so" target_so_ops routine for ia64-hpux. */
371 ia64_hpux_free_so (struct so_list
*so
)
376 /* The "clear_solib" target_so_ops routine for ia64-hpux. */
379 ia64_hpux_clear_solib (void)
383 while (so_list_head
!= NULL
)
386 so_list_head
= so_list_head
->next
;
388 ia64_hpux_free_so (so
);
393 /* Assuming the inferior just stopped on an EXEC event, return
394 the address of the load_info_t structure. */
397 ia64_hpux_get_load_info_addr (void)
399 struct type
*data_ptr_type
= builtin_type (target_gdbarch ())->builtin_data_ptr
;
403 /* The address of the load_info_t structure is stored in the 4th
404 argument passed to the initial thread of the process (in other
405 words, in argv[3]). So get the address of these arguments,
406 and extract the 4th one. */
407 status
= ttrace (TT_PROC_GET_ARGS
, ptid_get_pid (inferior_ptid
),
408 0, (uintptr_t) &addr
, sizeof (CORE_ADDR
), 0);
409 if (status
== -1 && errno
)
410 perror_with_name (_("Unable to get argument list"));
411 return (read_memory_typed_address (addr
+ 3 * 8, data_ptr_type
));
414 /* A structure used to aggregate some information extracted from
415 the dynamic section of the main executable. */
423 /* Scan the ".dynamic" section referenced by ABFD and DYN_SECT,
424 and extract the information needed to fill in INFO. */
427 ia64_hpux_read_dynamic_info (struct gdbarch
*gdbarch
, bfd
*abfd
,
428 asection
*dyn_sect
, struct dld_info
*info
)
434 /* Make sure that info always has initialized data, even if we fail
435 to read the syn_sect section. */
436 memset (info
, 0, sizeof (struct dld_info
));
438 sect_size
= bfd_section_size (abfd
, dyn_sect
);
439 buf
= alloca (sect_size
);
440 buf_end
= buf
+ sect_size
;
442 if (bfd_seek (abfd
, dyn_sect
->filepos
, SEEK_SET
) != 0
443 || bfd_bread (buf
, sect_size
, abfd
) != sect_size
)
444 error (_("failed to read contents of .dynamic section"));
446 for (; buf
< buf_end
; buf
+= sizeof (Elf64_Dyn
))
448 Elf64_Dyn
*dynp
= (Elf64_Dyn
*) buf
;
451 d_tag
= bfd_h_get_64 (abfd
, &dynp
->d_tag
);
454 case DT_HP_DLD_FLAGS
:
455 info
->dld_flags
= bfd_h_get_64 (abfd
, &dynp
->d_un
);
460 CORE_ADDR load_map_addr
= bfd_h_get_64 (abfd
, &dynp
->d_un
.d_ptr
);
462 if (target_read_memory (load_map_addr
,
463 (gdb_byte
*) &info
->load_map
,
464 sizeof (info
->load_map
)) != 0)
465 error (_("failed to read load map at %s"),
466 paddress (gdbarch
, load_map_addr
));
473 /* Wrapper around target_read_memory used with libdl. */
476 ia64_hpux_read_tgt_mem (void *buffer
, uint64_t ptr
, size_t bufsiz
, int ident
)
478 if (target_read_memory (ptr
, (gdb_byte
*) buffer
, bufsiz
) != 0)
484 /* Create a new so_list object for a shared library, and store that
485 new so_list object in our SO_LIST_HEAD list.
487 SO_INDEX is an index specifying the placement of the loaded shared
488 library in the dynamic loader's search list. Normally, this index
489 is strictly positive, but an index of -1 refers to the loader itself.
491 Return nonzero if the so_list object could be created. A null
492 return value with a positive SO_INDEX normally means that there are
493 no more entries in the dynamic loader's search list at SO_INDEX or
497 ia64_hpux_add_so_from_dld_info (struct dld_info info
, int so_index
)
499 struct load_module_desc module_desc
;
504 so_handle
= dlgetmodinfo (so_index
, &module_desc
, sizeof (module_desc
),
505 ia64_hpux_read_tgt_mem
, 0, info
.load_map
);
508 /* No such entry. We probably reached the end of the list. */
511 so_path
= dlgetname (&module_desc
, sizeof (module_desc
),
512 ia64_hpux_read_tgt_mem
, 0, info
.load_map
);
515 /* Should never happen, but let's not crash if it does. */
516 warning (_("unable to get shared library name, symbols not loaded"));
520 /* Create a new so_list and insert it at the start of our list.
521 The order is not extremely important, but it's less work to do so
522 at the end of the list. */
523 so
= new_so_list (so_path
, module_desc
);
524 so
->next
= so_list_head
;
530 /* Assuming we just attached to a process, update our list of shared
531 libraries (SO_LIST_HEAD) as well as GDB's list. */
534 ia64_hpux_solib_add_after_attach (void)
538 struct dld_info info
;
541 if (symfile_objfile
== NULL
)
544 abfd
= symfile_objfile
->obfd
;
545 dyn_sect
= bfd_get_section_by_name (abfd
, ".dynamic");
547 if (dyn_sect
== NULL
|| bfd_section_size (abfd
, dyn_sect
) == 0)
550 ia64_hpux_read_dynamic_info (get_objfile_arch (symfile_objfile
), abfd
,
553 if ((info
.dld_flags
& DT_HP_DEBUG_PRIVATE
) == 0)
556 "The shared libraries were not privately mapped; setting a breakpoint\n\
557 in a shared library will not work until you rerun the program.\n\
558 Use the following command to enable debugging of shared libraries.\n\
559 chatr +dbg enable a.out"));
562 /* Read the symbols of the dynamic loader (dld.so). */
563 ia64_hpux_add_so_from_dld_info (info
, -1);
565 /* Read the symbols of all the other shared libraries. */
567 if (!ia64_hpux_add_so_from_dld_info (info
, i
))
568 break; /* End of list. */
570 /* Resync the library list at the core level. */
571 solib_add (NULL
, 1, ¤t_target
, auto_solib_add
);
574 /* The "create_inferior_hook" target_so_ops routine for ia64-hpux. */
577 ia64_hpux_solib_create_inferior_hook (int from_tty
)
579 CORE_ADDR load_info_addr
;
580 load_info_t load_info
;
582 /* Initially, we were thinking about adding a check that the program
583 (accessible through symfile_objfile) was linked against some shared
584 libraries, by searching for a ".dynamic" section. However, could
585 this break in the case of a statically linked program that later
586 uses dlopen? Programs that are fully statically linked are very
587 rare, and we will worry about them when we encounter one that
590 /* Set the LI_TRACE flag in the load_info_t structure. This enables
591 notifications when shared libraries are being mapped. */
592 load_info_addr
= ia64_hpux_get_load_info_addr ();
593 read_memory (load_info_addr
, (gdb_byte
*) &load_info
, sizeof (load_info
));
594 load_info
.li_flags
|= LI_TRACE
;
595 write_memory (load_info_addr
, (gdb_byte
*) &load_info
, sizeof (load_info
));
597 /* If we just attached to our process, some shard libraries have
598 already been mapped. Find which ones they are... */
599 if (current_inferior ()->attach_flag
)
600 ia64_hpux_solib_add_after_attach ();
603 /* The "special_symbol_handling" target_so_ops routine for ia64-hpux. */
606 ia64_hpux_special_symbol_handling (void)
611 /* The "current_sos" target_so_ops routine for ia64-hpux. */
613 static struct so_list
*
614 ia64_hpux_current_sos (void)
616 /* Return a deep copy of our own list. */
617 struct so_list
*new_head
= NULL
, *prev_new_so
= NULL
;
618 struct so_list
*our_so
;
620 for (our_so
= so_list_head
; our_so
!= NULL
; our_so
= our_so
->next
)
622 struct so_list
*new_so
;
624 new_so
= new_so_list (our_so
->so_name
, our_so
->lm_info
->module_desc
);
625 if (prev_new_so
!= NULL
)
626 prev_new_so
->next
= new_so
;
627 prev_new_so
= new_so
;
628 if (new_head
== NULL
)
635 /* The "open_symbol_file_object" target_so_ops routine for ia64-hpux. */
638 ia64_hpux_open_symbol_file_object (void *from_ttyp
)
643 /* The "in_dynsym_resolve_code" target_so_ops routine for ia64-hpux. */
646 ia64_hpux_in_dynsym_resolve_code (CORE_ADDR pc
)
651 /* If FADDR is the address of a function inside one of the shared
652 libraries, return the shared library linkage address. */
655 ia64_hpux_get_solib_linkage_addr (CORE_ADDR faddr
)
657 struct so_list
*so
= so_list_head
;
661 struct load_module_desc module_desc
= so
->lm_info
->module_desc
;
663 if (module_desc
.text_base
<= faddr
664 && (module_desc
.text_base
+ module_desc
.text_size
) > faddr
)
665 return module_desc
.linkage_ptr
;
673 /* Create a new target_so_ops structure suitable for ia64-hpux, and
674 return its address. */
676 static struct target_so_ops
*
677 ia64_hpux_target_so_ops (void)
679 struct target_so_ops
*ops
= XCNEW (struct target_so_ops
);
681 ops
->relocate_section_addresses
= ia64_hpux_relocate_section_addresses
;
682 ops
->free_so
= ia64_hpux_free_so
;
683 ops
->clear_solib
= ia64_hpux_clear_solib
;
684 ops
->solib_create_inferior_hook
= ia64_hpux_solib_create_inferior_hook
;
685 ops
->special_symbol_handling
= ia64_hpux_special_symbol_handling
;
686 ops
->current_sos
= ia64_hpux_current_sos
;
687 ops
->open_symbol_file_object
= ia64_hpux_open_symbol_file_object
;
688 ops
->in_dynsym_resolve_code
= ia64_hpux_in_dynsym_resolve_code
;
689 ops
->bfd_open
= solib_bfd_open
;
694 /* Prevent warning from -Wmissing-prototypes. */
695 void _initialize_solib_ia64_hpux (void);
698 _initialize_solib_ia64_hpux (void)
700 ia64_hpux_so_ops
= ia64_hpux_target_so_ops ();