Replace clear_hook_in_cleanup with scoped_restore_hook_in
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* On SVR4 systems, a list of symbols in the dynamic linker where
55 GDB can try to place a breakpoint to monitor shared library
56 events.
57
58 If none of these symbols are found, or other errors occur, then
59 SVR4 systems will fall back to using a symbol as the "startup
60 mapping complete" breakpoint address. */
61
62 static const char * const solib_break_names[] =
63 {
64 "r_debug_state",
65 "_r_debug_state",
66 "_dl_debug_state",
67 "rtld_db_dlactivity",
68 "__dl_rtld_db_dlactivity",
69 "_rtld_debug_state",
70
71 NULL
72 };
73
74 static const char * const bkpt_names[] =
75 {
76 "_start",
77 "__start",
78 "main",
79 NULL
80 };
81
82 static const char * const main_name_list[] =
83 {
84 "main_$main",
85 NULL
86 };
87
88 /* What to do when a probe stop occurs. */
89
90 enum probe_action
91 {
92 /* Something went seriously wrong. Stop using probes and
93 revert to using the older interface. */
94 PROBES_INTERFACE_FAILED,
95
96 /* No action is required. The shared object list is still
97 valid. */
98 DO_NOTHING,
99
100 /* The shared object list should be reloaded entirely. */
101 FULL_RELOAD,
102
103 /* Attempt to incrementally update the shared object list. If
104 the update fails or is not possible, fall back to reloading
105 the list in full. */
106 UPDATE_OR_RELOAD,
107 };
108
109 /* A probe's name and its associated action. */
110
111 struct probe_info
112 {
113 /* The name of the probe. */
114 const char *name;
115
116 /* What to do when a probe stop occurs. */
117 enum probe_action action;
118 };
119
120 /* A list of named probes and their associated actions. If all
121 probes are present in the dynamic linker then the probes-based
122 interface will be used. */
123
124 static const struct probe_info probe_info[] =
125 {
126 { "init_start", DO_NOTHING },
127 { "init_complete", FULL_RELOAD },
128 { "map_start", DO_NOTHING },
129 { "map_failed", DO_NOTHING },
130 { "reloc_complete", UPDATE_OR_RELOAD },
131 { "unmap_start", DO_NOTHING },
132 { "unmap_complete", FULL_RELOAD },
133 };
134
135 #define NUM_PROBES ARRAY_SIZE (probe_info)
136
137 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
138 the same shared library. */
139
140 static int
141 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
142 {
143 if (strcmp (gdb_so_name, inferior_so_name) == 0)
144 return 1;
145
146 /* On Solaris, when starting inferior we think that dynamic linker is
147 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
148 contains /lib/ld.so.1. Sometimes one file is a link to another, but
149 sometimes they have identical content, but are not linked to each
150 other. We don't restrict this check for Solaris, but the chances
151 of running into this situation elsewhere are very low. */
152 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
153 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
154 return 1;
155
156 /* Similarly, we observed the same issue with sparc64, but with
157 different locations. */
158 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
159 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
160 return 1;
161
162 return 0;
163 }
164
165 static int
166 svr4_same (struct so_list *gdb, struct so_list *inferior)
167 {
168 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
169 }
170
171 static lm_info_svr4 *
172 lm_info_read (CORE_ADDR lm_addr)
173 {
174 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
175 gdb_byte *lm;
176 lm_info_svr4 *lm_info;
177 struct cleanup *back_to;
178
179 lm = (gdb_byte *) xmalloc (lmo->link_map_size);
180 back_to = make_cleanup (xfree, lm);
181
182 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
183 {
184 warning (_("Error reading shared library list entry at %s"),
185 paddress (target_gdbarch (), lm_addr)),
186 lm_info = NULL;
187 }
188 else
189 {
190 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
191
192 lm_info = new lm_info_svr4;
193 lm_info->lm_addr = lm_addr;
194
195 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
196 ptr_type);
197 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
198 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
199 ptr_type);
200 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
201 ptr_type);
202 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
203 ptr_type);
204 }
205
206 do_cleanups (back_to);
207
208 return lm_info;
209 }
210
211 static int
212 has_lm_dynamic_from_link_map (void)
213 {
214 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
215
216 return lmo->l_ld_offset >= 0;
217 }
218
219 static CORE_ADDR
220 lm_addr_check (const struct so_list *so, bfd *abfd)
221 {
222 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
223
224 if (!li->l_addr_p)
225 {
226 struct bfd_section *dyninfo_sect;
227 CORE_ADDR l_addr, l_dynaddr, dynaddr;
228
229 l_addr = li->l_addr_inferior;
230
231 if (! abfd || ! has_lm_dynamic_from_link_map ())
232 goto set_addr;
233
234 l_dynaddr = li->l_ld;
235
236 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
237 if (dyninfo_sect == NULL)
238 goto set_addr;
239
240 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
241
242 if (dynaddr + l_addr != l_dynaddr)
243 {
244 CORE_ADDR align = 0x1000;
245 CORE_ADDR minpagesize = align;
246
247 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
248 {
249 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
250 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
251 int i;
252
253 align = 1;
254
255 for (i = 0; i < ehdr->e_phnum; i++)
256 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
257 align = phdr[i].p_align;
258
259 minpagesize = get_elf_backend_data (abfd)->minpagesize;
260 }
261
262 /* Turn it into a mask. */
263 align--;
264
265 /* If the changes match the alignment requirements, we
266 assume we're using a core file that was generated by the
267 same binary, just prelinked with a different base offset.
268 If it doesn't match, we may have a different binary, the
269 same binary with the dynamic table loaded at an unrelated
270 location, or anything, really. To avoid regressions,
271 don't adjust the base offset in the latter case, although
272 odds are that, if things really changed, debugging won't
273 quite work.
274
275 One could expect more the condition
276 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
277 but the one below is relaxed for PPC. The PPC kernel supports
278 either 4k or 64k page sizes. To be prepared for 64k pages,
279 PPC ELF files are built using an alignment requirement of 64k.
280 However, when running on a kernel supporting 4k pages, the memory
281 mapping of the library may not actually happen on a 64k boundary!
282
283 (In the usual case where (l_addr & align) == 0, this check is
284 equivalent to the possibly expected check above.)
285
286 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
287
288 l_addr = l_dynaddr - dynaddr;
289
290 if ((l_addr & (minpagesize - 1)) == 0
291 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
292 {
293 if (info_verbose)
294 printf_unfiltered (_("Using PIC (Position Independent Code) "
295 "prelink displacement %s for \"%s\".\n"),
296 paddress (target_gdbarch (), l_addr),
297 so->so_name);
298 }
299 else
300 {
301 /* There is no way to verify the library file matches. prelink
302 can during prelinking of an unprelinked file (or unprelinking
303 of a prelinked file) shift the DYNAMIC segment by arbitrary
304 offset without any page size alignment. There is no way to
305 find out the ELF header and/or Program Headers for a limited
306 verification if it they match. One could do a verification
307 of the DYNAMIC segment. Still the found address is the best
308 one GDB could find. */
309
310 warning (_(".dynamic section for \"%s\" "
311 "is not at the expected address "
312 "(wrong library or version mismatch?)"), so->so_name);
313 }
314 }
315
316 set_addr:
317 li->l_addr = l_addr;
318 li->l_addr_p = 1;
319 }
320
321 return li->l_addr;
322 }
323
324 /* Per pspace SVR4 specific data. */
325
326 struct svr4_info
327 {
328 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
329
330 /* Validity flag for debug_loader_offset. */
331 int debug_loader_offset_p;
332
333 /* Load address for the dynamic linker, inferred. */
334 CORE_ADDR debug_loader_offset;
335
336 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
337 char *debug_loader_name;
338
339 /* Load map address for the main executable. */
340 CORE_ADDR main_lm_addr;
341
342 CORE_ADDR interp_text_sect_low;
343 CORE_ADDR interp_text_sect_high;
344 CORE_ADDR interp_plt_sect_low;
345 CORE_ADDR interp_plt_sect_high;
346
347 /* Nonzero if the list of objects was last obtained from the target
348 via qXfer:libraries-svr4:read. */
349 int using_xfer;
350
351 /* Table of struct probe_and_action instances, used by the
352 probes-based interface to map breakpoint addresses to probes
353 and their associated actions. Lookup is performed using
354 probe_and_action->probe->address. */
355 htab_t probes_table;
356
357 /* List of objects loaded into the inferior, used by the probes-
358 based interface. */
359 struct so_list *solib_list;
360 };
361
362 /* Per-program-space data key. */
363 static const struct program_space_data *solib_svr4_pspace_data;
364
365 /* Free the probes table. */
366
367 static void
368 free_probes_table (struct svr4_info *info)
369 {
370 if (info->probes_table == NULL)
371 return;
372
373 htab_delete (info->probes_table);
374 info->probes_table = NULL;
375 }
376
377 /* Free the solib list. */
378
379 static void
380 free_solib_list (struct svr4_info *info)
381 {
382 svr4_free_library_list (&info->solib_list);
383 info->solib_list = NULL;
384 }
385
386 static void
387 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
388 {
389 struct svr4_info *info = (struct svr4_info *) arg;
390
391 free_probes_table (info);
392 free_solib_list (info);
393
394 xfree (info);
395 }
396
397 /* Get the current svr4 data. If none is found yet, add it now. This
398 function always returns a valid object. */
399
400 static struct svr4_info *
401 get_svr4_info (void)
402 {
403 struct svr4_info *info;
404
405 info = (struct svr4_info *) program_space_data (current_program_space,
406 solib_svr4_pspace_data);
407 if (info != NULL)
408 return info;
409
410 info = XCNEW (struct svr4_info);
411 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
412 return info;
413 }
414
415 /* Local function prototypes */
416
417 static int match_main (const char *);
418
419 /* Read program header TYPE from inferior memory. The header is found
420 by scanning the OS auxillary vector.
421
422 If TYPE == -1, return the program headers instead of the contents of
423 one program header.
424
425 Return a pointer to allocated memory holding the program header contents,
426 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
427 size of those contents is returned to P_SECT_SIZE. Likewise, the target
428 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE and
429 the base address of the section is returned in BASE_ADDR. */
430
431 static gdb_byte *
432 read_program_header (int type, int *p_sect_size, int *p_arch_size,
433 CORE_ADDR *base_addr)
434 {
435 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
436 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
437 int arch_size, sect_size;
438 CORE_ADDR sect_addr;
439 gdb_byte *buf;
440 int pt_phdr_p = 0;
441
442 /* Get required auxv elements from target. */
443 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
444 return 0;
445 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
446 return 0;
447 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
448 return 0;
449 if (!at_phdr || !at_phnum)
450 return 0;
451
452 /* Determine ELF architecture type. */
453 if (at_phent == sizeof (Elf32_External_Phdr))
454 arch_size = 32;
455 else if (at_phent == sizeof (Elf64_External_Phdr))
456 arch_size = 64;
457 else
458 return 0;
459
460 /* Find the requested segment. */
461 if (type == -1)
462 {
463 sect_addr = at_phdr;
464 sect_size = at_phent * at_phnum;
465 }
466 else if (arch_size == 32)
467 {
468 Elf32_External_Phdr phdr;
469 int i;
470
471 /* Search for requested PHDR. */
472 for (i = 0; i < at_phnum; i++)
473 {
474 int p_type;
475
476 if (target_read_memory (at_phdr + i * sizeof (phdr),
477 (gdb_byte *)&phdr, sizeof (phdr)))
478 return 0;
479
480 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
481 4, byte_order);
482
483 if (p_type == PT_PHDR)
484 {
485 pt_phdr_p = 1;
486 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
487 4, byte_order);
488 }
489
490 if (p_type == type)
491 break;
492 }
493
494 if (i == at_phnum)
495 return 0;
496
497 /* Retrieve address and size. */
498 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
499 4, byte_order);
500 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
501 4, byte_order);
502 }
503 else
504 {
505 Elf64_External_Phdr phdr;
506 int i;
507
508 /* Search for requested PHDR. */
509 for (i = 0; i < at_phnum; i++)
510 {
511 int p_type;
512
513 if (target_read_memory (at_phdr + i * sizeof (phdr),
514 (gdb_byte *)&phdr, sizeof (phdr)))
515 return 0;
516
517 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
518 4, byte_order);
519
520 if (p_type == PT_PHDR)
521 {
522 pt_phdr_p = 1;
523 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
524 8, byte_order);
525 }
526
527 if (p_type == type)
528 break;
529 }
530
531 if (i == at_phnum)
532 return 0;
533
534 /* Retrieve address and size. */
535 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
536 8, byte_order);
537 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
538 8, byte_order);
539 }
540
541 /* PT_PHDR is optional, but we really need it
542 for PIE to make this work in general. */
543
544 if (pt_phdr_p)
545 {
546 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
547 Relocation offset is the difference between the two. */
548 sect_addr = sect_addr + (at_phdr - pt_phdr);
549 }
550
551 /* Read in requested program header. */
552 buf = (gdb_byte *) xmalloc (sect_size);
553 if (target_read_memory (sect_addr, buf, sect_size))
554 {
555 xfree (buf);
556 return NULL;
557 }
558
559 if (p_arch_size)
560 *p_arch_size = arch_size;
561 if (p_sect_size)
562 *p_sect_size = sect_size;
563 if (base_addr)
564 *base_addr = sect_addr;
565
566 return buf;
567 }
568
569
570 /* Return program interpreter string. */
571 static char *
572 find_program_interpreter (void)
573 {
574 gdb_byte *buf = NULL;
575
576 /* If we have an exec_bfd, use its section table. */
577 if (exec_bfd
578 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
579 {
580 struct bfd_section *interp_sect;
581
582 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
583 if (interp_sect != NULL)
584 {
585 int sect_size = bfd_section_size (exec_bfd, interp_sect);
586
587 buf = (gdb_byte *) xmalloc (sect_size);
588 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
589 }
590 }
591
592 /* If we didn't find it, use the target auxillary vector. */
593 if (!buf)
594 buf = read_program_header (PT_INTERP, NULL, NULL, NULL);
595
596 return (char *) buf;
597 }
598
599
600 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
601 found, 1 is returned and the corresponding PTR is set. */
602
603 static int
604 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr,
605 CORE_ADDR *ptr_addr)
606 {
607 int arch_size, step, sect_size;
608 long current_dyntag;
609 CORE_ADDR dyn_ptr, dyn_addr;
610 gdb_byte *bufend, *bufstart, *buf;
611 Elf32_External_Dyn *x_dynp_32;
612 Elf64_External_Dyn *x_dynp_64;
613 struct bfd_section *sect;
614 struct target_section *target_section;
615
616 if (abfd == NULL)
617 return 0;
618
619 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
620 return 0;
621
622 arch_size = bfd_get_arch_size (abfd);
623 if (arch_size == -1)
624 return 0;
625
626 /* Find the start address of the .dynamic section. */
627 sect = bfd_get_section_by_name (abfd, ".dynamic");
628 if (sect == NULL)
629 return 0;
630
631 for (target_section = current_target_sections->sections;
632 target_section < current_target_sections->sections_end;
633 target_section++)
634 if (sect == target_section->the_bfd_section)
635 break;
636 if (target_section < current_target_sections->sections_end)
637 dyn_addr = target_section->addr;
638 else
639 {
640 /* ABFD may come from OBJFILE acting only as a symbol file without being
641 loaded into the target (see add_symbol_file_command). This case is
642 such fallback to the file VMA address without the possibility of
643 having the section relocated to its actual in-memory address. */
644
645 dyn_addr = bfd_section_vma (abfd, sect);
646 }
647
648 /* Read in .dynamic from the BFD. We will get the actual value
649 from memory later. */
650 sect_size = bfd_section_size (abfd, sect);
651 buf = bufstart = (gdb_byte *) alloca (sect_size);
652 if (!bfd_get_section_contents (abfd, sect,
653 buf, 0, sect_size))
654 return 0;
655
656 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
657 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
658 : sizeof (Elf64_External_Dyn);
659 for (bufend = buf + sect_size;
660 buf < bufend;
661 buf += step)
662 {
663 if (arch_size == 32)
664 {
665 x_dynp_32 = (Elf32_External_Dyn *) buf;
666 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
667 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
668 }
669 else
670 {
671 x_dynp_64 = (Elf64_External_Dyn *) buf;
672 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
673 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
674 }
675 if (current_dyntag == DT_NULL)
676 return 0;
677 if (current_dyntag == desired_dyntag)
678 {
679 /* If requested, try to read the runtime value of this .dynamic
680 entry. */
681 if (ptr)
682 {
683 struct type *ptr_type;
684 gdb_byte ptr_buf[8];
685 CORE_ADDR ptr_addr_1;
686
687 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
688 ptr_addr_1 = dyn_addr + (buf - bufstart) + arch_size / 8;
689 if (target_read_memory (ptr_addr_1, ptr_buf, arch_size / 8) == 0)
690 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
691 *ptr = dyn_ptr;
692 if (ptr_addr)
693 *ptr_addr = dyn_addr + (buf - bufstart);
694 }
695 return 1;
696 }
697 }
698
699 return 0;
700 }
701
702 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
703 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
704 is returned and the corresponding PTR is set. */
705
706 static int
707 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr,
708 CORE_ADDR *ptr_addr)
709 {
710 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
711 int sect_size, arch_size, step;
712 long current_dyntag;
713 CORE_ADDR dyn_ptr;
714 CORE_ADDR base_addr;
715 gdb_byte *bufend, *bufstart, *buf;
716
717 /* Read in .dynamic section. */
718 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size,
719 &base_addr);
720 if (!buf)
721 return 0;
722
723 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
724 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
725 : sizeof (Elf64_External_Dyn);
726 for (bufend = buf + sect_size;
727 buf < bufend;
728 buf += step)
729 {
730 if (arch_size == 32)
731 {
732 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
733
734 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735 4, byte_order);
736 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737 4, byte_order);
738 }
739 else
740 {
741 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
742
743 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
744 8, byte_order);
745 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
746 8, byte_order);
747 }
748 if (current_dyntag == DT_NULL)
749 break;
750
751 if (current_dyntag == desired_dyntag)
752 {
753 if (ptr)
754 *ptr = dyn_ptr;
755
756 if (ptr_addr)
757 *ptr_addr = base_addr + buf - bufstart;
758
759 xfree (bufstart);
760 return 1;
761 }
762 }
763
764 xfree (bufstart);
765 return 0;
766 }
767
768 /* Locate the base address of dynamic linker structs for SVR4 elf
769 targets.
770
771 For SVR4 elf targets the address of the dynamic linker's runtime
772 structure is contained within the dynamic info section in the
773 executable file. The dynamic section is also mapped into the
774 inferior address space. Because the runtime loader fills in the
775 real address before starting the inferior, we have to read in the
776 dynamic info section from the inferior address space.
777 If there are any errors while trying to find the address, we
778 silently return 0, otherwise the found address is returned. */
779
780 static CORE_ADDR
781 elf_locate_base (void)
782 {
783 struct bound_minimal_symbol msymbol;
784 CORE_ADDR dyn_ptr, dyn_ptr_addr;
785
786 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
787 instead of DT_DEBUG, although they sometimes contain an unused
788 DT_DEBUG. */
789 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr, NULL)
790 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr, NULL))
791 {
792 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
793 gdb_byte *pbuf;
794 int pbuf_size = TYPE_LENGTH (ptr_type);
795
796 pbuf = (gdb_byte *) alloca (pbuf_size);
797 /* DT_MIPS_RLD_MAP contains a pointer to the address
798 of the dynamic link structure. */
799 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
800 return 0;
801 return extract_typed_address (pbuf, ptr_type);
802 }
803
804 /* Then check DT_MIPS_RLD_MAP_REL. MIPS executables now use this form
805 because of needing to support PIE. DT_MIPS_RLD_MAP will also exist
806 in non-PIE. */
807 if (scan_dyntag (DT_MIPS_RLD_MAP_REL, exec_bfd, &dyn_ptr, &dyn_ptr_addr)
808 || scan_dyntag_auxv (DT_MIPS_RLD_MAP_REL, &dyn_ptr, &dyn_ptr_addr))
809 {
810 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
811 gdb_byte *pbuf;
812 int pbuf_size = TYPE_LENGTH (ptr_type);
813
814 pbuf = (gdb_byte *) alloca (pbuf_size);
815 /* DT_MIPS_RLD_MAP_REL contains an offset from the address of the
816 DT slot to the address of the dynamic link structure. */
817 if (target_read_memory (dyn_ptr + dyn_ptr_addr, pbuf, pbuf_size))
818 return 0;
819 return extract_typed_address (pbuf, ptr_type);
820 }
821
822 /* Find DT_DEBUG. */
823 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr, NULL)
824 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL))
825 return dyn_ptr;
826
827 /* This may be a static executable. Look for the symbol
828 conventionally named _r_debug, as a last resort. */
829 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
830 if (msymbol.minsym != NULL)
831 return BMSYMBOL_VALUE_ADDRESS (msymbol);
832
833 /* DT_DEBUG entry not found. */
834 return 0;
835 }
836
837 /* Locate the base address of dynamic linker structs.
838
839 For both the SunOS and SVR4 shared library implementations, if the
840 inferior executable has been linked dynamically, there is a single
841 address somewhere in the inferior's data space which is the key to
842 locating all of the dynamic linker's runtime structures. This
843 address is the value of the debug base symbol. The job of this
844 function is to find and return that address, or to return 0 if there
845 is no such address (the executable is statically linked for example).
846
847 For SunOS, the job is almost trivial, since the dynamic linker and
848 all of it's structures are statically linked to the executable at
849 link time. Thus the symbol for the address we are looking for has
850 already been added to the minimal symbol table for the executable's
851 objfile at the time the symbol file's symbols were read, and all we
852 have to do is look it up there. Note that we explicitly do NOT want
853 to find the copies in the shared library.
854
855 The SVR4 version is a bit more complicated because the address
856 is contained somewhere in the dynamic info section. We have to go
857 to a lot more work to discover the address of the debug base symbol.
858 Because of this complexity, we cache the value we find and return that
859 value on subsequent invocations. Note there is no copy in the
860 executable symbol tables. */
861
862 static CORE_ADDR
863 locate_base (struct svr4_info *info)
864 {
865 /* Check to see if we have a currently valid address, and if so, avoid
866 doing all this work again and just return the cached address. If
867 we have no cached address, try to locate it in the dynamic info
868 section for ELF executables. There's no point in doing any of this
869 though if we don't have some link map offsets to work with. */
870
871 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
872 info->debug_base = elf_locate_base ();
873 return info->debug_base;
874 }
875
876 /* Find the first element in the inferior's dynamic link map, and
877 return its address in the inferior. Return zero if the address
878 could not be determined.
879
880 FIXME: Perhaps we should validate the info somehow, perhaps by
881 checking r_version for a known version number, or r_state for
882 RT_CONSISTENT. */
883
884 static CORE_ADDR
885 solib_svr4_r_map (struct svr4_info *info)
886 {
887 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
888 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
889 CORE_ADDR addr = 0;
890
891 TRY
892 {
893 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
894 ptr_type);
895 }
896 CATCH (ex, RETURN_MASK_ERROR)
897 {
898 exception_print (gdb_stderr, ex);
899 }
900 END_CATCH
901
902 return addr;
903 }
904
905 /* Find r_brk from the inferior's debug base. */
906
907 static CORE_ADDR
908 solib_svr4_r_brk (struct svr4_info *info)
909 {
910 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
911 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
912
913 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
914 ptr_type);
915 }
916
917 /* Find the link map for the dynamic linker (if it is not in the
918 normal list of loaded shared objects). */
919
920 static CORE_ADDR
921 solib_svr4_r_ldsomap (struct svr4_info *info)
922 {
923 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
924 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
925 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
926 ULONGEST version = 0;
927
928 TRY
929 {
930 /* Check version, and return zero if `struct r_debug' doesn't have
931 the r_ldsomap member. */
932 version
933 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
934 lmo->r_version_size, byte_order);
935 }
936 CATCH (ex, RETURN_MASK_ERROR)
937 {
938 exception_print (gdb_stderr, ex);
939 }
940 END_CATCH
941
942 if (version < 2 || lmo->r_ldsomap_offset == -1)
943 return 0;
944
945 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
946 ptr_type);
947 }
948
949 /* On Solaris systems with some versions of the dynamic linker,
950 ld.so's l_name pointer points to the SONAME in the string table
951 rather than into writable memory. So that GDB can find shared
952 libraries when loading a core file generated by gcore, ensure that
953 memory areas containing the l_name string are saved in the core
954 file. */
955
956 static int
957 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
958 {
959 struct svr4_info *info;
960 CORE_ADDR ldsomap;
961 struct so_list *newobj;
962 struct cleanup *old_chain;
963 CORE_ADDR name_lm;
964
965 info = get_svr4_info ();
966
967 info->debug_base = 0;
968 locate_base (info);
969 if (!info->debug_base)
970 return 0;
971
972 ldsomap = solib_svr4_r_ldsomap (info);
973 if (!ldsomap)
974 return 0;
975
976 newobj = XCNEW (struct so_list);
977 old_chain = make_cleanup (xfree, newobj);
978 lm_info_svr4 *li = lm_info_read (ldsomap);
979 newobj->lm_info = li;
980 make_cleanup (xfree, newobj->lm_info);
981 name_lm = li != NULL ? li->l_name : 0;
982 do_cleanups (old_chain);
983
984 return (name_lm >= vaddr && name_lm < vaddr + size);
985 }
986
987 /* Implement the "open_symbol_file_object" target_so_ops method.
988
989 If no open symbol file, attempt to locate and open the main symbol
990 file. On SVR4 systems, this is the first link map entry. If its
991 name is here, we can open it. Useful when attaching to a process
992 without first loading its symbol file. */
993
994 static int
995 open_symbol_file_object (void *from_ttyp)
996 {
997 CORE_ADDR lm, l_name;
998 char *filename;
999 int errcode;
1000 int from_tty = *(int *)from_ttyp;
1001 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1002 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
1003 int l_name_size = TYPE_LENGTH (ptr_type);
1004 gdb_byte *l_name_buf = (gdb_byte *) xmalloc (l_name_size);
1005 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1006 struct svr4_info *info = get_svr4_info ();
1007 symfile_add_flags add_flags = 0;
1008
1009 if (from_tty)
1010 add_flags |= SYMFILE_VERBOSE;
1011
1012 if (symfile_objfile)
1013 if (!query (_("Attempt to reload symbols from process? ")))
1014 {
1015 do_cleanups (cleanups);
1016 return 0;
1017 }
1018
1019 /* Always locate the debug struct, in case it has moved. */
1020 info->debug_base = 0;
1021 if (locate_base (info) == 0)
1022 {
1023 do_cleanups (cleanups);
1024 return 0; /* failed somehow... */
1025 }
1026
1027 /* First link map member should be the executable. */
1028 lm = solib_svr4_r_map (info);
1029 if (lm == 0)
1030 {
1031 do_cleanups (cleanups);
1032 return 0; /* failed somehow... */
1033 }
1034
1035 /* Read address of name from target memory to GDB. */
1036 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1037
1038 /* Convert the address to host format. */
1039 l_name = extract_typed_address (l_name_buf, ptr_type);
1040
1041 if (l_name == 0)
1042 {
1043 do_cleanups (cleanups);
1044 return 0; /* No filename. */
1045 }
1046
1047 /* Now fetch the filename from target memory. */
1048 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1049 make_cleanup (xfree, filename);
1050
1051 if (errcode)
1052 {
1053 warning (_("failed to read exec filename from attached file: %s"),
1054 safe_strerror (errcode));
1055 do_cleanups (cleanups);
1056 return 0;
1057 }
1058
1059 /* Have a pathname: read the symbol file. */
1060 symbol_file_add_main (filename, add_flags);
1061
1062 do_cleanups (cleanups);
1063 return 1;
1064 }
1065
1066 /* Data exchange structure for the XML parser as returned by
1067 svr4_current_sos_via_xfer_libraries. */
1068
1069 struct svr4_library_list
1070 {
1071 struct so_list *head, **tailp;
1072
1073 /* Inferior address of struct link_map used for the main executable. It is
1074 NULL if not known. */
1075 CORE_ADDR main_lm;
1076 };
1077
1078 /* Implementation for target_so_ops.free_so. */
1079
1080 static void
1081 svr4_free_so (struct so_list *so)
1082 {
1083 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1084
1085 delete li;
1086 }
1087
1088 /* Implement target_so_ops.clear_so. */
1089
1090 static void
1091 svr4_clear_so (struct so_list *so)
1092 {
1093 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1094
1095 if (li != NULL)
1096 li->l_addr_p = 0;
1097 }
1098
1099 /* Free so_list built so far (called via cleanup). */
1100
1101 static void
1102 svr4_free_library_list (void *p_list)
1103 {
1104 struct so_list *list = *(struct so_list **) p_list;
1105
1106 while (list != NULL)
1107 {
1108 struct so_list *next = list->next;
1109
1110 free_so (list);
1111 list = next;
1112 }
1113 }
1114
1115 /* Copy library list. */
1116
1117 static struct so_list *
1118 svr4_copy_library_list (struct so_list *src)
1119 {
1120 struct so_list *dst = NULL;
1121 struct so_list **link = &dst;
1122
1123 while (src != NULL)
1124 {
1125 struct so_list *newobj;
1126
1127 newobj = XNEW (struct so_list);
1128 memcpy (newobj, src, sizeof (struct so_list));
1129
1130 lm_info_svr4 *src_li = (lm_info_svr4 *) src->lm_info;
1131 newobj->lm_info = new lm_info_svr4 (*src_li);
1132
1133 newobj->next = NULL;
1134 *link = newobj;
1135 link = &newobj->next;
1136
1137 src = src->next;
1138 }
1139
1140 return dst;
1141 }
1142
1143 #ifdef HAVE_LIBEXPAT
1144
1145 #include "xml-support.h"
1146
1147 /* Handle the start of a <library> element. Note: new elements are added
1148 at the tail of the list, keeping the list in order. */
1149
1150 static void
1151 library_list_start_library (struct gdb_xml_parser *parser,
1152 const struct gdb_xml_element *element,
1153 void *user_data, VEC(gdb_xml_value_s) *attributes)
1154 {
1155 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1156 const char *name
1157 = (const char *) xml_find_attribute (attributes, "name")->value;
1158 ULONGEST *lmp
1159 = (ULONGEST *) xml_find_attribute (attributes, "lm")->value;
1160 ULONGEST *l_addrp
1161 = (ULONGEST *) xml_find_attribute (attributes, "l_addr")->value;
1162 ULONGEST *l_ldp
1163 = (ULONGEST *) xml_find_attribute (attributes, "l_ld")->value;
1164 struct so_list *new_elem;
1165
1166 new_elem = XCNEW (struct so_list);
1167 lm_info_svr4 *li = new lm_info_svr4;
1168 new_elem->lm_info = li;
1169 li->lm_addr = *lmp;
1170 li->l_addr_inferior = *l_addrp;
1171 li->l_ld = *l_ldp;
1172
1173 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1174 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1175 strcpy (new_elem->so_original_name, new_elem->so_name);
1176
1177 *list->tailp = new_elem;
1178 list->tailp = &new_elem->next;
1179 }
1180
1181 /* Handle the start of a <library-list-svr4> element. */
1182
1183 static void
1184 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1185 const struct gdb_xml_element *element,
1186 void *user_data, VEC(gdb_xml_value_s) *attributes)
1187 {
1188 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1189 const char *version
1190 = (const char *) xml_find_attribute (attributes, "version")->value;
1191 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1192
1193 if (strcmp (version, "1.0") != 0)
1194 gdb_xml_error (parser,
1195 _("SVR4 Library list has unsupported version \"%s\""),
1196 version);
1197
1198 if (main_lm)
1199 list->main_lm = *(ULONGEST *) main_lm->value;
1200 }
1201
1202 /* The allowed elements and attributes for an XML library list.
1203 The root element is a <library-list>. */
1204
1205 static const struct gdb_xml_attribute svr4_library_attributes[] =
1206 {
1207 { "name", GDB_XML_AF_NONE, NULL, NULL },
1208 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1209 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1210 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1211 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1212 };
1213
1214 static const struct gdb_xml_element svr4_library_list_children[] =
1215 {
1216 {
1217 "library", svr4_library_attributes, NULL,
1218 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1219 library_list_start_library, NULL
1220 },
1221 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1222 };
1223
1224 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1225 {
1226 { "version", GDB_XML_AF_NONE, NULL, NULL },
1227 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1228 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1229 };
1230
1231 static const struct gdb_xml_element svr4_library_list_elements[] =
1232 {
1233 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1234 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1235 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1236 };
1237
1238 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1239
1240 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1241 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1242 empty, caller is responsible for freeing all its entries. */
1243
1244 static int
1245 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1246 {
1247 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1248 &list->head);
1249
1250 memset (list, 0, sizeof (*list));
1251 list->tailp = &list->head;
1252 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1253 svr4_library_list_elements, document, list) == 0)
1254 {
1255 /* Parsed successfully, keep the result. */
1256 discard_cleanups (back_to);
1257 return 1;
1258 }
1259
1260 do_cleanups (back_to);
1261 return 0;
1262 }
1263
1264 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1265
1266 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1267 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1268 empty, caller is responsible for freeing all its entries.
1269
1270 Note that ANNEX must be NULL if the remote does not explicitly allow
1271 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1272 this can be checked using target_augmented_libraries_svr4_read (). */
1273
1274 static int
1275 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1276 const char *annex)
1277 {
1278 char *svr4_library_document;
1279 int result;
1280 struct cleanup *back_to;
1281
1282 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1283
1284 /* Fetch the list of shared libraries. */
1285 svr4_library_document = target_read_stralloc (&current_target,
1286 TARGET_OBJECT_LIBRARIES_SVR4,
1287 annex);
1288 if (svr4_library_document == NULL)
1289 return 0;
1290
1291 back_to = make_cleanup (xfree, svr4_library_document);
1292 result = svr4_parse_libraries (svr4_library_document, list);
1293 do_cleanups (back_to);
1294
1295 return result;
1296 }
1297
1298 #else
1299
1300 static int
1301 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1302 const char *annex)
1303 {
1304 return 0;
1305 }
1306
1307 #endif
1308
1309 /* If no shared library information is available from the dynamic
1310 linker, build a fallback list from other sources. */
1311
1312 static struct so_list *
1313 svr4_default_sos (void)
1314 {
1315 struct svr4_info *info = get_svr4_info ();
1316 struct so_list *newobj;
1317
1318 if (!info->debug_loader_offset_p)
1319 return NULL;
1320
1321 newobj = XCNEW (struct so_list);
1322 lm_info_svr4 *li = new lm_info_svr4;
1323 newobj->lm_info = li;
1324
1325 /* Nothing will ever check the other fields if we set l_addr_p. */
1326 li->l_addr = info->debug_loader_offset;
1327 li->l_addr_p = 1;
1328
1329 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1330 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1331 strcpy (newobj->so_original_name, newobj->so_name);
1332
1333 return newobj;
1334 }
1335
1336 /* Read the whole inferior libraries chain starting at address LM.
1337 Expect the first entry in the chain's previous entry to be PREV_LM.
1338 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1339 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1340 to it. Returns nonzero upon success. If zero is returned the
1341 entries stored to LINK_PTR_PTR are still valid although they may
1342 represent only part of the inferior library list. */
1343
1344 static int
1345 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1346 struct so_list ***link_ptr_ptr, int ignore_first)
1347 {
1348 CORE_ADDR first_l_name = 0;
1349 CORE_ADDR next_lm;
1350
1351 for (; lm != 0; prev_lm = lm, lm = next_lm)
1352 {
1353 int errcode;
1354 char *buffer;
1355
1356 so_list_up newobj (XCNEW (struct so_list));
1357
1358 lm_info_svr4 *li = lm_info_read (lm);
1359 newobj->lm_info = li;
1360 if (li == NULL)
1361 return 0;
1362
1363 next_lm = li->l_next;
1364
1365 if (li->l_prev != prev_lm)
1366 {
1367 warning (_("Corrupted shared library list: %s != %s"),
1368 paddress (target_gdbarch (), prev_lm),
1369 paddress (target_gdbarch (), li->l_prev));
1370 return 0;
1371 }
1372
1373 /* For SVR4 versions, the first entry in the link map is for the
1374 inferior executable, so we must ignore it. For some versions of
1375 SVR4, it has no name. For others (Solaris 2.3 for example), it
1376 does have a name, so we can no longer use a missing name to
1377 decide when to ignore it. */
1378 if (ignore_first && li->l_prev == 0)
1379 {
1380 struct svr4_info *info = get_svr4_info ();
1381
1382 first_l_name = li->l_name;
1383 info->main_lm_addr = li->lm_addr;
1384 continue;
1385 }
1386
1387 /* Extract this shared object's name. */
1388 target_read_string (li->l_name, &buffer, SO_NAME_MAX_PATH_SIZE - 1,
1389 &errcode);
1390 if (errcode != 0)
1391 {
1392 /* If this entry's l_name address matches that of the
1393 inferior executable, then this is not a normal shared
1394 object, but (most likely) a vDSO. In this case, silently
1395 skip it; otherwise emit a warning. */
1396 if (first_l_name == 0 || li->l_name != first_l_name)
1397 warning (_("Can't read pathname for load map: %s."),
1398 safe_strerror (errcode));
1399 continue;
1400 }
1401
1402 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1403 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1404 strcpy (newobj->so_original_name, newobj->so_name);
1405 xfree (buffer);
1406
1407 /* If this entry has no name, or its name matches the name
1408 for the main executable, don't include it in the list. */
1409 if (! newobj->so_name[0] || match_main (newobj->so_name))
1410 continue;
1411
1412 newobj->next = 0;
1413 /* Don't free it now. */
1414 **link_ptr_ptr = newobj.release ();
1415 *link_ptr_ptr = &(**link_ptr_ptr)->next;
1416 }
1417
1418 return 1;
1419 }
1420
1421 /* Read the full list of currently loaded shared objects directly
1422 from the inferior, without referring to any libraries read and
1423 stored by the probes interface. Handle special cases relating
1424 to the first elements of the list. */
1425
1426 static struct so_list *
1427 svr4_current_sos_direct (struct svr4_info *info)
1428 {
1429 CORE_ADDR lm;
1430 struct so_list *head = NULL;
1431 struct so_list **link_ptr = &head;
1432 struct cleanup *back_to;
1433 int ignore_first;
1434 struct svr4_library_list library_list;
1435
1436 /* Fall back to manual examination of the target if the packet is not
1437 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1438 tests a case where gdbserver cannot find the shared libraries list while
1439 GDB itself is able to find it via SYMFILE_OBJFILE.
1440
1441 Unfortunately statically linked inferiors will also fall back through this
1442 suboptimal code path. */
1443
1444 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1445 NULL);
1446 if (info->using_xfer)
1447 {
1448 if (library_list.main_lm)
1449 info->main_lm_addr = library_list.main_lm;
1450
1451 return library_list.head ? library_list.head : svr4_default_sos ();
1452 }
1453
1454 /* Always locate the debug struct, in case it has moved. */
1455 info->debug_base = 0;
1456 locate_base (info);
1457
1458 /* If we can't find the dynamic linker's base structure, this
1459 must not be a dynamically linked executable. Hmm. */
1460 if (! info->debug_base)
1461 return svr4_default_sos ();
1462
1463 /* Assume that everything is a library if the dynamic loader was loaded
1464 late by a static executable. */
1465 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1466 ignore_first = 0;
1467 else
1468 ignore_first = 1;
1469
1470 back_to = make_cleanup (svr4_free_library_list, &head);
1471
1472 /* Walk the inferior's link map list, and build our list of
1473 `struct so_list' nodes. */
1474 lm = solib_svr4_r_map (info);
1475 if (lm)
1476 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1477
1478 /* On Solaris, the dynamic linker is not in the normal list of
1479 shared objects, so make sure we pick it up too. Having
1480 symbol information for the dynamic linker is quite crucial
1481 for skipping dynamic linker resolver code. */
1482 lm = solib_svr4_r_ldsomap (info);
1483 if (lm)
1484 svr4_read_so_list (lm, 0, &link_ptr, 0);
1485
1486 discard_cleanups (back_to);
1487
1488 if (head == NULL)
1489 return svr4_default_sos ();
1490
1491 return head;
1492 }
1493
1494 /* Implement the main part of the "current_sos" target_so_ops
1495 method. */
1496
1497 static struct so_list *
1498 svr4_current_sos_1 (void)
1499 {
1500 struct svr4_info *info = get_svr4_info ();
1501
1502 /* If the solib list has been read and stored by the probes
1503 interface then we return a copy of the stored list. */
1504 if (info->solib_list != NULL)
1505 return svr4_copy_library_list (info->solib_list);
1506
1507 /* Otherwise obtain the solib list directly from the inferior. */
1508 return svr4_current_sos_direct (info);
1509 }
1510
1511 /* Implement the "current_sos" target_so_ops method. */
1512
1513 static struct so_list *
1514 svr4_current_sos (void)
1515 {
1516 struct so_list *so_head = svr4_current_sos_1 ();
1517 struct mem_range vsyscall_range;
1518
1519 /* Filter out the vDSO module, if present. Its symbol file would
1520 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1521 managed by symfile-mem.c:add_vsyscall_page. */
1522 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1523 && vsyscall_range.length != 0)
1524 {
1525 struct so_list **sop;
1526
1527 sop = &so_head;
1528 while (*sop != NULL)
1529 {
1530 struct so_list *so = *sop;
1531
1532 /* We can't simply match the vDSO by starting address alone,
1533 because lm_info->l_addr_inferior (and also l_addr) do not
1534 necessarily represent the real starting address of the
1535 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1536 field (the ".dynamic" section of the shared object)
1537 always points at the absolute/resolved address though.
1538 So check whether that address is inside the vDSO's
1539 mapping instead.
1540
1541 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1542 0-based ELF, and we see:
1543
1544 (gdb) info auxv
1545 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1546 (gdb) p/x *_r_debug.r_map.l_next
1547 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1548
1549 And on Linux 2.6.32 (x86_64) we see:
1550
1551 (gdb) info auxv
1552 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1553 (gdb) p/x *_r_debug.r_map.l_next
1554 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1555
1556 Dumping that vDSO shows:
1557
1558 (gdb) info proc mappings
1559 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1560 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1561 # readelf -Wa vdso.bin
1562 [...]
1563 Entry point address: 0xffffffffff700700
1564 [...]
1565 Section Headers:
1566 [Nr] Name Type Address Off Size
1567 [ 0] NULL 0000000000000000 000000 000000
1568 [ 1] .hash HASH ffffffffff700120 000120 000038
1569 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1570 [...]
1571 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1572 */
1573
1574 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1575
1576 if (address_in_mem_range (li->l_ld, &vsyscall_range))
1577 {
1578 *sop = so->next;
1579 free_so (so);
1580 break;
1581 }
1582
1583 sop = &so->next;
1584 }
1585 }
1586
1587 return so_head;
1588 }
1589
1590 /* Get the address of the link_map for a given OBJFILE. */
1591
1592 CORE_ADDR
1593 svr4_fetch_objfile_link_map (struct objfile *objfile)
1594 {
1595 struct so_list *so;
1596 struct svr4_info *info = get_svr4_info ();
1597
1598 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1599 if (info->main_lm_addr == 0)
1600 solib_add (NULL, 0, auto_solib_add);
1601
1602 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1603 if (objfile == symfile_objfile)
1604 return info->main_lm_addr;
1605
1606 /* The other link map addresses may be found by examining the list
1607 of shared libraries. */
1608 for (so = master_so_list (); so; so = so->next)
1609 if (so->objfile == objfile)
1610 {
1611 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1612
1613 return li->lm_addr;
1614 }
1615
1616 /* Not found! */
1617 return 0;
1618 }
1619
1620 /* On some systems, the only way to recognize the link map entry for
1621 the main executable file is by looking at its name. Return
1622 non-zero iff SONAME matches one of the known main executable names. */
1623
1624 static int
1625 match_main (const char *soname)
1626 {
1627 const char * const *mainp;
1628
1629 for (mainp = main_name_list; *mainp != NULL; mainp++)
1630 {
1631 if (strcmp (soname, *mainp) == 0)
1632 return (1);
1633 }
1634
1635 return (0);
1636 }
1637
1638 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1639 SVR4 run time loader. */
1640
1641 int
1642 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1643 {
1644 struct svr4_info *info = get_svr4_info ();
1645
1646 return ((pc >= info->interp_text_sect_low
1647 && pc < info->interp_text_sect_high)
1648 || (pc >= info->interp_plt_sect_low
1649 && pc < info->interp_plt_sect_high)
1650 || in_plt_section (pc)
1651 || in_gnu_ifunc_stub (pc));
1652 }
1653
1654 /* Given an executable's ABFD and target, compute the entry-point
1655 address. */
1656
1657 static CORE_ADDR
1658 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1659 {
1660 CORE_ADDR addr;
1661
1662 /* KevinB wrote ... for most targets, the address returned by
1663 bfd_get_start_address() is the entry point for the start
1664 function. But, for some targets, bfd_get_start_address() returns
1665 the address of a function descriptor from which the entry point
1666 address may be extracted. This address is extracted by
1667 gdbarch_convert_from_func_ptr_addr(). The method
1668 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1669 function for targets which don't use function descriptors. */
1670 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1671 bfd_get_start_address (abfd),
1672 targ);
1673 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1674 }
1675
1676 /* A probe and its associated action. */
1677
1678 struct probe_and_action
1679 {
1680 /* The probe. */
1681 struct probe *probe;
1682
1683 /* The relocated address of the probe. */
1684 CORE_ADDR address;
1685
1686 /* The action. */
1687 enum probe_action action;
1688 };
1689
1690 /* Returns a hash code for the probe_and_action referenced by p. */
1691
1692 static hashval_t
1693 hash_probe_and_action (const void *p)
1694 {
1695 const struct probe_and_action *pa = (const struct probe_and_action *) p;
1696
1697 return (hashval_t) pa->address;
1698 }
1699
1700 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1701 are equal. */
1702
1703 static int
1704 equal_probe_and_action (const void *p1, const void *p2)
1705 {
1706 const struct probe_and_action *pa1 = (const struct probe_and_action *) p1;
1707 const struct probe_and_action *pa2 = (const struct probe_and_action *) p2;
1708
1709 return pa1->address == pa2->address;
1710 }
1711
1712 /* Register a solib event probe and its associated action in the
1713 probes table. */
1714
1715 static void
1716 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1717 enum probe_action action)
1718 {
1719 struct svr4_info *info = get_svr4_info ();
1720 struct probe_and_action lookup, *pa;
1721 void **slot;
1722
1723 /* Create the probes table, if necessary. */
1724 if (info->probes_table == NULL)
1725 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1726 equal_probe_and_action,
1727 xfree, xcalloc, xfree);
1728
1729 lookup.probe = probe;
1730 lookup.address = address;
1731 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1732 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1733
1734 pa = XCNEW (struct probe_and_action);
1735 pa->probe = probe;
1736 pa->address = address;
1737 pa->action = action;
1738
1739 *slot = pa;
1740 }
1741
1742 /* Get the solib event probe at the specified location, and the
1743 action associated with it. Returns NULL if no solib event probe
1744 was found. */
1745
1746 static struct probe_and_action *
1747 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1748 {
1749 struct probe_and_action lookup;
1750 void **slot;
1751
1752 lookup.address = address;
1753 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1754
1755 if (slot == NULL)
1756 return NULL;
1757
1758 return (struct probe_and_action *) *slot;
1759 }
1760
1761 /* Decide what action to take when the specified solib event probe is
1762 hit. */
1763
1764 static enum probe_action
1765 solib_event_probe_action (struct probe_and_action *pa)
1766 {
1767 enum probe_action action;
1768 unsigned probe_argc = 0;
1769 struct frame_info *frame = get_current_frame ();
1770
1771 action = pa->action;
1772 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1773 return action;
1774
1775 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1776
1777 /* Check that an appropriate number of arguments has been supplied.
1778 We expect:
1779 arg0: Lmid_t lmid (mandatory)
1780 arg1: struct r_debug *debug_base (mandatory)
1781 arg2: struct link_map *new (optional, for incremental updates) */
1782 TRY
1783 {
1784 probe_argc = get_probe_argument_count (pa->probe, frame);
1785 }
1786 CATCH (ex, RETURN_MASK_ERROR)
1787 {
1788 exception_print (gdb_stderr, ex);
1789 probe_argc = 0;
1790 }
1791 END_CATCH
1792
1793 /* If get_probe_argument_count throws an exception, probe_argc will
1794 be set to zero. However, if pa->probe does not have arguments,
1795 then get_probe_argument_count will succeed but probe_argc will
1796 also be zero. Both cases happen because of different things, but
1797 they are treated equally here: action will be set to
1798 PROBES_INTERFACE_FAILED. */
1799 if (probe_argc == 2)
1800 action = FULL_RELOAD;
1801 else if (probe_argc < 2)
1802 action = PROBES_INTERFACE_FAILED;
1803
1804 return action;
1805 }
1806
1807 /* Populate the shared object list by reading the entire list of
1808 shared objects from the inferior. Handle special cases relating
1809 to the first elements of the list. Returns nonzero on success. */
1810
1811 static int
1812 solist_update_full (struct svr4_info *info)
1813 {
1814 free_solib_list (info);
1815 info->solib_list = svr4_current_sos_direct (info);
1816
1817 return 1;
1818 }
1819
1820 /* Update the shared object list starting from the link-map entry
1821 passed by the linker in the probe's third argument. Returns
1822 nonzero if the list was successfully updated, or zero to indicate
1823 failure. */
1824
1825 static int
1826 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1827 {
1828 struct so_list *tail;
1829 CORE_ADDR prev_lm;
1830
1831 /* svr4_current_sos_direct contains logic to handle a number of
1832 special cases relating to the first elements of the list. To
1833 avoid duplicating this logic we defer to solist_update_full
1834 if the list is empty. */
1835 if (info->solib_list == NULL)
1836 return 0;
1837
1838 /* Fall back to a full update if we are using a remote target
1839 that does not support incremental transfers. */
1840 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1841 return 0;
1842
1843 /* Walk to the end of the list. */
1844 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1845 /* Nothing. */;
1846
1847 lm_info_svr4 *li = (lm_info_svr4 *) tail->lm_info;
1848 prev_lm = li->lm_addr;
1849
1850 /* Read the new objects. */
1851 if (info->using_xfer)
1852 {
1853 struct svr4_library_list library_list;
1854 char annex[64];
1855
1856 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1857 phex_nz (lm, sizeof (lm)),
1858 phex_nz (prev_lm, sizeof (prev_lm)));
1859 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1860 return 0;
1861
1862 tail->next = library_list.head;
1863 }
1864 else
1865 {
1866 struct so_list **link = &tail->next;
1867
1868 /* IGNORE_FIRST may safely be set to zero here because the
1869 above check and deferral to solist_update_full ensures
1870 that this call to svr4_read_so_list will never see the
1871 first element. */
1872 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1873 return 0;
1874 }
1875
1876 return 1;
1877 }
1878
1879 /* Disable the probes-based linker interface and revert to the
1880 original interface. We don't reset the breakpoints as the
1881 ones set up for the probes-based interface are adequate. */
1882
1883 static void
1884 disable_probes_interface_cleanup (void *arg)
1885 {
1886 struct svr4_info *info = get_svr4_info ();
1887
1888 warning (_("Probes-based dynamic linker interface failed.\n"
1889 "Reverting to original interface.\n"));
1890
1891 free_probes_table (info);
1892 free_solib_list (info);
1893 }
1894
1895 /* Update the solib list as appropriate when using the
1896 probes-based linker interface. Do nothing if using the
1897 standard interface. */
1898
1899 static void
1900 svr4_handle_solib_event (void)
1901 {
1902 struct svr4_info *info = get_svr4_info ();
1903 struct probe_and_action *pa;
1904 enum probe_action action;
1905 struct cleanup *old_chain, *usm_chain;
1906 struct value *val = NULL;
1907 CORE_ADDR pc, debug_base, lm = 0;
1908 struct frame_info *frame = get_current_frame ();
1909
1910 /* Do nothing if not using the probes interface. */
1911 if (info->probes_table == NULL)
1912 return;
1913
1914 /* If anything goes wrong we revert to the original linker
1915 interface. */
1916 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1917
1918 pc = regcache_read_pc (get_current_regcache ());
1919 pa = solib_event_probe_at (info, pc);
1920 if (pa == NULL)
1921 {
1922 do_cleanups (old_chain);
1923 return;
1924 }
1925
1926 action = solib_event_probe_action (pa);
1927 if (action == PROBES_INTERFACE_FAILED)
1928 {
1929 do_cleanups (old_chain);
1930 return;
1931 }
1932
1933 if (action == DO_NOTHING)
1934 {
1935 discard_cleanups (old_chain);
1936 return;
1937 }
1938
1939 /* evaluate_probe_argument looks up symbols in the dynamic linker
1940 using find_pc_section. find_pc_section is accelerated by a cache
1941 called the section map. The section map is invalidated every
1942 time a shared library is loaded or unloaded, and if the inferior
1943 is generating a lot of shared library events then the section map
1944 will be updated every time svr4_handle_solib_event is called.
1945 We called find_pc_section in svr4_create_solib_event_breakpoints,
1946 so we can guarantee that the dynamic linker's sections are in the
1947 section map. We can therefore inhibit section map updates across
1948 these calls to evaluate_probe_argument and save a lot of time. */
1949 inhibit_section_map_updates (current_program_space);
1950 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1951 current_program_space);
1952
1953 TRY
1954 {
1955 val = evaluate_probe_argument (pa->probe, 1, frame);
1956 }
1957 CATCH (ex, RETURN_MASK_ERROR)
1958 {
1959 exception_print (gdb_stderr, ex);
1960 val = NULL;
1961 }
1962 END_CATCH
1963
1964 if (val == NULL)
1965 {
1966 do_cleanups (old_chain);
1967 return;
1968 }
1969
1970 debug_base = value_as_address (val);
1971 if (debug_base == 0)
1972 {
1973 do_cleanups (old_chain);
1974 return;
1975 }
1976
1977 /* Always locate the debug struct, in case it moved. */
1978 info->debug_base = 0;
1979 if (locate_base (info) == 0)
1980 {
1981 do_cleanups (old_chain);
1982 return;
1983 }
1984
1985 /* GDB does not currently support libraries loaded via dlmopen
1986 into namespaces other than the initial one. We must ignore
1987 any namespace other than the initial namespace here until
1988 support for this is added to GDB. */
1989 if (debug_base != info->debug_base)
1990 action = DO_NOTHING;
1991
1992 if (action == UPDATE_OR_RELOAD)
1993 {
1994 TRY
1995 {
1996 val = evaluate_probe_argument (pa->probe, 2, frame);
1997 }
1998 CATCH (ex, RETURN_MASK_ERROR)
1999 {
2000 exception_print (gdb_stderr, ex);
2001 do_cleanups (old_chain);
2002 return;
2003 }
2004 END_CATCH
2005
2006 if (val != NULL)
2007 lm = value_as_address (val);
2008
2009 if (lm == 0)
2010 action = FULL_RELOAD;
2011 }
2012
2013 /* Resume section map updates. */
2014 do_cleanups (usm_chain);
2015
2016 if (action == UPDATE_OR_RELOAD)
2017 {
2018 if (!solist_update_incremental (info, lm))
2019 action = FULL_RELOAD;
2020 }
2021
2022 if (action == FULL_RELOAD)
2023 {
2024 if (!solist_update_full (info))
2025 {
2026 do_cleanups (old_chain);
2027 return;
2028 }
2029 }
2030
2031 discard_cleanups (old_chain);
2032 }
2033
2034 /* Helper function for svr4_update_solib_event_breakpoints. */
2035
2036 static int
2037 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
2038 {
2039 struct bp_location *loc;
2040
2041 if (b->type != bp_shlib_event)
2042 {
2043 /* Continue iterating. */
2044 return 0;
2045 }
2046
2047 for (loc = b->loc; loc != NULL; loc = loc->next)
2048 {
2049 struct svr4_info *info;
2050 struct probe_and_action *pa;
2051
2052 info = ((struct svr4_info *)
2053 program_space_data (loc->pspace, solib_svr4_pspace_data));
2054 if (info == NULL || info->probes_table == NULL)
2055 continue;
2056
2057 pa = solib_event_probe_at (info, loc->address);
2058 if (pa == NULL)
2059 continue;
2060
2061 if (pa->action == DO_NOTHING)
2062 {
2063 if (b->enable_state == bp_disabled && stop_on_solib_events)
2064 enable_breakpoint (b);
2065 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
2066 disable_breakpoint (b);
2067 }
2068
2069 break;
2070 }
2071
2072 /* Continue iterating. */
2073 return 0;
2074 }
2075
2076 /* Enable or disable optional solib event breakpoints as appropriate.
2077 Called whenever stop_on_solib_events is changed. */
2078
2079 static void
2080 svr4_update_solib_event_breakpoints (void)
2081 {
2082 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2083 }
2084
2085 /* Create and register solib event breakpoints. PROBES is an array
2086 of NUM_PROBES elements, each of which is vector of probes. A
2087 solib event breakpoint will be created and registered for each
2088 probe. */
2089
2090 static void
2091 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2092 VEC (probe_p) **probes,
2093 struct objfile *objfile)
2094 {
2095 int i;
2096
2097 for (i = 0; i < NUM_PROBES; i++)
2098 {
2099 enum probe_action action = probe_info[i].action;
2100 struct probe *probe;
2101 int ix;
2102
2103 for (ix = 0;
2104 VEC_iterate (probe_p, probes[i], ix, probe);
2105 ++ix)
2106 {
2107 CORE_ADDR address = get_probe_address (probe, objfile);
2108
2109 create_solib_event_breakpoint (gdbarch, address);
2110 register_solib_event_probe (probe, address, action);
2111 }
2112 }
2113
2114 svr4_update_solib_event_breakpoints ();
2115 }
2116
2117 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2118 before and after mapping and unmapping shared libraries. The sole
2119 purpose of this method is to allow debuggers to set a breakpoint so
2120 they can track these changes.
2121
2122 Some versions of the glibc dynamic linker contain named probes
2123 to allow more fine grained stopping. Given the address of the
2124 original marker function, this function attempts to find these
2125 probes, and if found, sets breakpoints on those instead. If the
2126 probes aren't found, a single breakpoint is set on the original
2127 marker function. */
2128
2129 static void
2130 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2131 CORE_ADDR address)
2132 {
2133 struct obj_section *os;
2134
2135 os = find_pc_section (address);
2136 if (os != NULL)
2137 {
2138 int with_prefix;
2139
2140 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2141 {
2142 VEC (probe_p) *probes[NUM_PROBES];
2143 int all_probes_found = 1;
2144 int checked_can_use_probe_arguments = 0;
2145 int i;
2146
2147 memset (probes, 0, sizeof (probes));
2148 for (i = 0; i < NUM_PROBES; i++)
2149 {
2150 const char *name = probe_info[i].name;
2151 struct probe *p;
2152 char buf[32];
2153
2154 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2155 shipped with an early version of the probes code in
2156 which the probes' names were prefixed with "rtld_"
2157 and the "map_failed" probe did not exist. The
2158 locations of the probes are otherwise the same, so
2159 we check for probes with prefixed names if probes
2160 with unprefixed names are not present. */
2161 if (with_prefix)
2162 {
2163 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2164 name = buf;
2165 }
2166
2167 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2168
2169 /* The "map_failed" probe did not exist in early
2170 versions of the probes code in which the probes'
2171 names were prefixed with "rtld_". */
2172 if (strcmp (name, "rtld_map_failed") == 0)
2173 continue;
2174
2175 if (VEC_empty (probe_p, probes[i]))
2176 {
2177 all_probes_found = 0;
2178 break;
2179 }
2180
2181 /* Ensure probe arguments can be evaluated. */
2182 if (!checked_can_use_probe_arguments)
2183 {
2184 p = VEC_index (probe_p, probes[i], 0);
2185 if (!can_evaluate_probe_arguments (p))
2186 {
2187 all_probes_found = 0;
2188 break;
2189 }
2190 checked_can_use_probe_arguments = 1;
2191 }
2192 }
2193
2194 if (all_probes_found)
2195 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2196
2197 for (i = 0; i < NUM_PROBES; i++)
2198 VEC_free (probe_p, probes[i]);
2199
2200 if (all_probes_found)
2201 return;
2202 }
2203 }
2204
2205 create_solib_event_breakpoint (gdbarch, address);
2206 }
2207
2208 /* Helper function for gdb_bfd_lookup_symbol. */
2209
2210 static int
2211 cmp_name_and_sec_flags (const asymbol *sym, const void *data)
2212 {
2213 return (strcmp (sym->name, (const char *) data) == 0
2214 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2215 }
2216 /* Arrange for dynamic linker to hit breakpoint.
2217
2218 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2219 debugger interface, support for arranging for the inferior to hit
2220 a breakpoint after mapping in the shared libraries. This function
2221 enables that breakpoint.
2222
2223 For SunOS, there is a special flag location (in_debugger) which we
2224 set to 1. When the dynamic linker sees this flag set, it will set
2225 a breakpoint at a location known only to itself, after saving the
2226 original contents of that place and the breakpoint address itself,
2227 in it's own internal structures. When we resume the inferior, it
2228 will eventually take a SIGTRAP when it runs into the breakpoint.
2229 We handle this (in a different place) by restoring the contents of
2230 the breakpointed location (which is only known after it stops),
2231 chasing around to locate the shared libraries that have been
2232 loaded, then resuming.
2233
2234 For SVR4, the debugger interface structure contains a member (r_brk)
2235 which is statically initialized at the time the shared library is
2236 built, to the offset of a function (_r_debug_state) which is guaran-
2237 teed to be called once before mapping in a library, and again when
2238 the mapping is complete. At the time we are examining this member,
2239 it contains only the unrelocated offset of the function, so we have
2240 to do our own relocation. Later, when the dynamic linker actually
2241 runs, it relocates r_brk to be the actual address of _r_debug_state().
2242
2243 The debugger interface structure also contains an enumeration which
2244 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2245 depending upon whether or not the library is being mapped or unmapped,
2246 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2247
2248 static int
2249 enable_break (struct svr4_info *info, int from_tty)
2250 {
2251 struct bound_minimal_symbol msymbol;
2252 const char * const *bkpt_namep;
2253 asection *interp_sect;
2254 char *interp_name;
2255 CORE_ADDR sym_addr;
2256
2257 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2258 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2259
2260 /* If we already have a shared library list in the target, and
2261 r_debug contains r_brk, set the breakpoint there - this should
2262 mean r_brk has already been relocated. Assume the dynamic linker
2263 is the object containing r_brk. */
2264
2265 solib_add (NULL, from_tty, auto_solib_add);
2266 sym_addr = 0;
2267 if (info->debug_base && solib_svr4_r_map (info) != 0)
2268 sym_addr = solib_svr4_r_brk (info);
2269
2270 if (sym_addr != 0)
2271 {
2272 struct obj_section *os;
2273
2274 sym_addr = gdbarch_addr_bits_remove
2275 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2276 sym_addr,
2277 &current_target));
2278
2279 /* On at least some versions of Solaris there's a dynamic relocation
2280 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2281 we get control before the dynamic linker has self-relocated.
2282 Check if SYM_ADDR is in a known section, if it is assume we can
2283 trust its value. This is just a heuristic though, it could go away
2284 or be replaced if it's getting in the way.
2285
2286 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2287 however it's spelled in your particular system) is ARM or Thumb.
2288 That knowledge is encoded in the address, if it's Thumb the low bit
2289 is 1. However, we've stripped that info above and it's not clear
2290 what all the consequences are of passing a non-addr_bits_remove'd
2291 address to svr4_create_solib_event_breakpoints. The call to
2292 find_pc_section verifies we know about the address and have some
2293 hope of computing the right kind of breakpoint to use (via
2294 symbol info). It does mean that GDB needs to be pointed at a
2295 non-stripped version of the dynamic linker in order to obtain
2296 information it already knows about. Sigh. */
2297
2298 os = find_pc_section (sym_addr);
2299 if (os != NULL)
2300 {
2301 /* Record the relocated start and end address of the dynamic linker
2302 text and plt section for svr4_in_dynsym_resolve_code. */
2303 bfd *tmp_bfd;
2304 CORE_ADDR load_addr;
2305
2306 tmp_bfd = os->objfile->obfd;
2307 load_addr = ANOFFSET (os->objfile->section_offsets,
2308 SECT_OFF_TEXT (os->objfile));
2309
2310 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2311 if (interp_sect)
2312 {
2313 info->interp_text_sect_low =
2314 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2315 info->interp_text_sect_high =
2316 info->interp_text_sect_low
2317 + bfd_section_size (tmp_bfd, interp_sect);
2318 }
2319 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2320 if (interp_sect)
2321 {
2322 info->interp_plt_sect_low =
2323 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2324 info->interp_plt_sect_high =
2325 info->interp_plt_sect_low
2326 + bfd_section_size (tmp_bfd, interp_sect);
2327 }
2328
2329 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2330 return 1;
2331 }
2332 }
2333
2334 /* Find the program interpreter; if not found, warn the user and drop
2335 into the old breakpoint at symbol code. */
2336 interp_name = find_program_interpreter ();
2337 if (interp_name)
2338 {
2339 CORE_ADDR load_addr = 0;
2340 int load_addr_found = 0;
2341 int loader_found_in_list = 0;
2342 struct so_list *so;
2343 struct target_ops *tmp_bfd_target;
2344
2345 sym_addr = 0;
2346
2347 /* Now we need to figure out where the dynamic linker was
2348 loaded so that we can load its symbols and place a breakpoint
2349 in the dynamic linker itself.
2350
2351 This address is stored on the stack. However, I've been unable
2352 to find any magic formula to find it for Solaris (appears to
2353 be trivial on GNU/Linux). Therefore, we have to try an alternate
2354 mechanism to find the dynamic linker's base address. */
2355
2356 gdb_bfd_ref_ptr tmp_bfd;
2357 TRY
2358 {
2359 tmp_bfd = solib_bfd_open (interp_name);
2360 }
2361 CATCH (ex, RETURN_MASK_ALL)
2362 {
2363 }
2364 END_CATCH
2365
2366 if (tmp_bfd == NULL)
2367 goto bkpt_at_symbol;
2368
2369 /* Now convert the TMP_BFD into a target. That way target, as
2370 well as BFD operations can be used. target_bfd_reopen
2371 acquires its own reference. */
2372 tmp_bfd_target = target_bfd_reopen (tmp_bfd.get ());
2373
2374 /* On a running target, we can get the dynamic linker's base
2375 address from the shared library table. */
2376 so = master_so_list ();
2377 while (so)
2378 {
2379 if (svr4_same_1 (interp_name, so->so_original_name))
2380 {
2381 load_addr_found = 1;
2382 loader_found_in_list = 1;
2383 load_addr = lm_addr_check (so, tmp_bfd.get ());
2384 break;
2385 }
2386 so = so->next;
2387 }
2388
2389 /* If we were not able to find the base address of the loader
2390 from our so_list, then try using the AT_BASE auxilliary entry. */
2391 if (!load_addr_found)
2392 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2393 {
2394 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2395
2396 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2397 that `+ load_addr' will overflow CORE_ADDR width not creating
2398 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2399 GDB. */
2400
2401 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2402 {
2403 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2404 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd.get (),
2405 tmp_bfd_target);
2406
2407 gdb_assert (load_addr < space_size);
2408
2409 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2410 64bit ld.so with 32bit executable, it should not happen. */
2411
2412 if (tmp_entry_point < space_size
2413 && tmp_entry_point + load_addr >= space_size)
2414 load_addr -= space_size;
2415 }
2416
2417 load_addr_found = 1;
2418 }
2419
2420 /* Otherwise we find the dynamic linker's base address by examining
2421 the current pc (which should point at the entry point for the
2422 dynamic linker) and subtracting the offset of the entry point.
2423
2424 This is more fragile than the previous approaches, but is a good
2425 fallback method because it has actually been working well in
2426 most cases. */
2427 if (!load_addr_found)
2428 {
2429 struct regcache *regcache
2430 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2431
2432 load_addr = (regcache_read_pc (regcache)
2433 - exec_entry_point (tmp_bfd.get (), tmp_bfd_target));
2434 }
2435
2436 if (!loader_found_in_list)
2437 {
2438 info->debug_loader_name = xstrdup (interp_name);
2439 info->debug_loader_offset_p = 1;
2440 info->debug_loader_offset = load_addr;
2441 solib_add (NULL, from_tty, auto_solib_add);
2442 }
2443
2444 /* Record the relocated start and end address of the dynamic linker
2445 text and plt section for svr4_in_dynsym_resolve_code. */
2446 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".text");
2447 if (interp_sect)
2448 {
2449 info->interp_text_sect_low =
2450 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2451 info->interp_text_sect_high =
2452 info->interp_text_sect_low
2453 + bfd_section_size (tmp_bfd.get (), interp_sect);
2454 }
2455 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".plt");
2456 if (interp_sect)
2457 {
2458 info->interp_plt_sect_low =
2459 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2460 info->interp_plt_sect_high =
2461 info->interp_plt_sect_low
2462 + bfd_section_size (tmp_bfd.get (), interp_sect);
2463 }
2464
2465 /* Now try to set a breakpoint in the dynamic linker. */
2466 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2467 {
2468 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd.get (),
2469 cmp_name_and_sec_flags,
2470 *bkpt_namep);
2471 if (sym_addr != 0)
2472 break;
2473 }
2474
2475 if (sym_addr != 0)
2476 /* Convert 'sym_addr' from a function pointer to an address.
2477 Because we pass tmp_bfd_target instead of the current
2478 target, this will always produce an unrelocated value. */
2479 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2480 sym_addr,
2481 tmp_bfd_target);
2482
2483 /* We're done with both the temporary bfd and target. Closing
2484 the target closes the underlying bfd, because it holds the
2485 only remaining reference. */
2486 target_close (tmp_bfd_target);
2487
2488 if (sym_addr != 0)
2489 {
2490 svr4_create_solib_event_breakpoints (target_gdbarch (),
2491 load_addr + sym_addr);
2492 xfree (interp_name);
2493 return 1;
2494 }
2495
2496 /* For whatever reason we couldn't set a breakpoint in the dynamic
2497 linker. Warn and drop into the old code. */
2498 bkpt_at_symbol:
2499 xfree (interp_name);
2500 warning (_("Unable to find dynamic linker breakpoint function.\n"
2501 "GDB will be unable to debug shared library initializers\n"
2502 "and track explicitly loaded dynamic code."));
2503 }
2504
2505 /* Scan through the lists of symbols, trying to look up the symbol and
2506 set a breakpoint there. Terminate loop when we/if we succeed. */
2507
2508 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2509 {
2510 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2511 if ((msymbol.minsym != NULL)
2512 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2513 {
2514 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2515 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2516 sym_addr,
2517 &current_target);
2518 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2519 return 1;
2520 }
2521 }
2522
2523 if (interp_name != NULL && !current_inferior ()->attach_flag)
2524 {
2525 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2526 {
2527 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2528 if ((msymbol.minsym != NULL)
2529 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2530 {
2531 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2532 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2533 sym_addr,
2534 &current_target);
2535 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2536 return 1;
2537 }
2538 }
2539 }
2540 return 0;
2541 }
2542
2543 /* Read the ELF program headers from ABFD. Return the contents and
2544 set *PHDRS_SIZE to the size of the program headers. */
2545
2546 static gdb_byte *
2547 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2548 {
2549 Elf_Internal_Ehdr *ehdr;
2550 gdb_byte *buf;
2551
2552 ehdr = elf_elfheader (abfd);
2553
2554 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2555 if (*phdrs_size == 0)
2556 return NULL;
2557
2558 buf = (gdb_byte *) xmalloc (*phdrs_size);
2559 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2560 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2561 {
2562 xfree (buf);
2563 return NULL;
2564 }
2565
2566 return buf;
2567 }
2568
2569 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2570 exec_bfd. Otherwise return 0.
2571
2572 We relocate all of the sections by the same amount. This
2573 behavior is mandated by recent editions of the System V ABI.
2574 According to the System V Application Binary Interface,
2575 Edition 4.1, page 5-5:
2576
2577 ... Though the system chooses virtual addresses for
2578 individual processes, it maintains the segments' relative
2579 positions. Because position-independent code uses relative
2580 addressesing between segments, the difference between
2581 virtual addresses in memory must match the difference
2582 between virtual addresses in the file. The difference
2583 between the virtual address of any segment in memory and
2584 the corresponding virtual address in the file is thus a
2585 single constant value for any one executable or shared
2586 object in a given process. This difference is the base
2587 address. One use of the base address is to relocate the
2588 memory image of the program during dynamic linking.
2589
2590 The same language also appears in Edition 4.0 of the System V
2591 ABI and is left unspecified in some of the earlier editions.
2592
2593 Decide if the objfile needs to be relocated. As indicated above, we will
2594 only be here when execution is stopped. But during attachment PC can be at
2595 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2596 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2597 regcache_read_pc would point to the interpreter and not the main executable.
2598
2599 So, to summarize, relocations are necessary when the start address obtained
2600 from the executable is different from the address in auxv AT_ENTRY entry.
2601
2602 [ The astute reader will note that we also test to make sure that
2603 the executable in question has the DYNAMIC flag set. It is my
2604 opinion that this test is unnecessary (undesirable even). It
2605 was added to avoid inadvertent relocation of an executable
2606 whose e_type member in the ELF header is not ET_DYN. There may
2607 be a time in the future when it is desirable to do relocations
2608 on other types of files as well in which case this condition
2609 should either be removed or modified to accomodate the new file
2610 type. - Kevin, Nov 2000. ] */
2611
2612 static int
2613 svr4_exec_displacement (CORE_ADDR *displacementp)
2614 {
2615 /* ENTRY_POINT is a possible function descriptor - before
2616 a call to gdbarch_convert_from_func_ptr_addr. */
2617 CORE_ADDR entry_point, exec_displacement;
2618
2619 if (exec_bfd == NULL)
2620 return 0;
2621
2622 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2623 being executed themselves and PIE (Position Independent Executable)
2624 executables are ET_DYN. */
2625
2626 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2627 return 0;
2628
2629 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2630 return 0;
2631
2632 exec_displacement = entry_point - bfd_get_start_address (exec_bfd);
2633
2634 /* Verify the EXEC_DISPLACEMENT candidate complies with the required page
2635 alignment. It is cheaper than the program headers comparison below. */
2636
2637 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2638 {
2639 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2640
2641 /* p_align of PT_LOAD segments does not specify any alignment but
2642 only congruency of addresses:
2643 p_offset % p_align == p_vaddr % p_align
2644 Kernel is free to load the executable with lower alignment. */
2645
2646 if ((exec_displacement & (elf->minpagesize - 1)) != 0)
2647 return 0;
2648 }
2649
2650 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2651 comparing their program headers. If the program headers in the auxilliary
2652 vector do not match the program headers in the executable, then we are
2653 looking at a different file than the one used by the kernel - for
2654 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2655
2656 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2657 {
2658 /* Be optimistic and clear OK only if GDB was able to verify the headers
2659 really do not match. */
2660 int phdrs_size, phdrs2_size, ok = 1;
2661 gdb_byte *buf, *buf2;
2662 int arch_size;
2663
2664 buf = read_program_header (-1, &phdrs_size, &arch_size, NULL);
2665 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2666 if (buf != NULL && buf2 != NULL)
2667 {
2668 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2669
2670 /* We are dealing with three different addresses. EXEC_BFD
2671 represents current address in on-disk file. target memory content
2672 may be different from EXEC_BFD as the file may have been prelinked
2673 to a different address after the executable has been loaded.
2674 Moreover the address of placement in target memory can be
2675 different from what the program headers in target memory say -
2676 this is the goal of PIE.
2677
2678 Detected DISPLACEMENT covers both the offsets of PIE placement and
2679 possible new prelink performed after start of the program. Here
2680 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2681 content offset for the verification purpose. */
2682
2683 if (phdrs_size != phdrs2_size
2684 || bfd_get_arch_size (exec_bfd) != arch_size)
2685 ok = 0;
2686 else if (arch_size == 32
2687 && phdrs_size >= sizeof (Elf32_External_Phdr)
2688 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2689 {
2690 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2691 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2692 CORE_ADDR displacement = 0;
2693 int i;
2694
2695 /* DISPLACEMENT could be found more easily by the difference of
2696 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2697 already have enough information to compute that displacement
2698 with what we've read. */
2699
2700 for (i = 0; i < ehdr2->e_phnum; i++)
2701 if (phdr2[i].p_type == PT_LOAD)
2702 {
2703 Elf32_External_Phdr *phdrp;
2704 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2705 CORE_ADDR vaddr, paddr;
2706 CORE_ADDR displacement_vaddr = 0;
2707 CORE_ADDR displacement_paddr = 0;
2708
2709 phdrp = &((Elf32_External_Phdr *) buf)[i];
2710 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2711 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2712
2713 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2714 byte_order);
2715 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2716
2717 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2718 byte_order);
2719 displacement_paddr = paddr - phdr2[i].p_paddr;
2720
2721 if (displacement_vaddr == displacement_paddr)
2722 displacement = displacement_vaddr;
2723
2724 break;
2725 }
2726
2727 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2728
2729 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2730 {
2731 Elf32_External_Phdr *phdrp;
2732 Elf32_External_Phdr *phdr2p;
2733 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2734 CORE_ADDR vaddr, paddr;
2735 asection *plt2_asect;
2736
2737 phdrp = &((Elf32_External_Phdr *) buf)[i];
2738 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2739 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2740 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2741
2742 /* PT_GNU_STACK is an exception by being never relocated by
2743 prelink as its addresses are always zero. */
2744
2745 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2746 continue;
2747
2748 /* Check also other adjustment combinations - PR 11786. */
2749
2750 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2751 byte_order);
2752 vaddr -= displacement;
2753 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2754
2755 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2756 byte_order);
2757 paddr -= displacement;
2758 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2759
2760 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2761 continue;
2762
2763 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2764 CentOS-5 has problems with filesz, memsz as well.
2765 See PR 11786. */
2766 if (phdr2[i].p_type == PT_GNU_RELRO)
2767 {
2768 Elf32_External_Phdr tmp_phdr = *phdrp;
2769 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2770
2771 memset (tmp_phdr.p_filesz, 0, 4);
2772 memset (tmp_phdr.p_memsz, 0, 4);
2773 memset (tmp_phdr.p_flags, 0, 4);
2774 memset (tmp_phdr.p_align, 0, 4);
2775 memset (tmp_phdr2.p_filesz, 0, 4);
2776 memset (tmp_phdr2.p_memsz, 0, 4);
2777 memset (tmp_phdr2.p_flags, 0, 4);
2778 memset (tmp_phdr2.p_align, 0, 4);
2779
2780 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2781 == 0)
2782 continue;
2783 }
2784
2785 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2786 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2787 if (plt2_asect)
2788 {
2789 int content2;
2790 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2791 CORE_ADDR filesz;
2792
2793 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2794 & SEC_HAS_CONTENTS) != 0;
2795
2796 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2797 byte_order);
2798
2799 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2800 FILESZ is from the in-memory image. */
2801 if (content2)
2802 filesz += bfd_get_section_size (plt2_asect);
2803 else
2804 filesz -= bfd_get_section_size (plt2_asect);
2805
2806 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2807 filesz);
2808
2809 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2810 continue;
2811 }
2812
2813 ok = 0;
2814 break;
2815 }
2816 }
2817 else if (arch_size == 64
2818 && phdrs_size >= sizeof (Elf64_External_Phdr)
2819 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2820 {
2821 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2822 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2823 CORE_ADDR displacement = 0;
2824 int i;
2825
2826 /* DISPLACEMENT could be found more easily by the difference of
2827 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2828 already have enough information to compute that displacement
2829 with what we've read. */
2830
2831 for (i = 0; i < ehdr2->e_phnum; i++)
2832 if (phdr2[i].p_type == PT_LOAD)
2833 {
2834 Elf64_External_Phdr *phdrp;
2835 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2836 CORE_ADDR vaddr, paddr;
2837 CORE_ADDR displacement_vaddr = 0;
2838 CORE_ADDR displacement_paddr = 0;
2839
2840 phdrp = &((Elf64_External_Phdr *) buf)[i];
2841 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2842 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2843
2844 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2845 byte_order);
2846 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2847
2848 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2849 byte_order);
2850 displacement_paddr = paddr - phdr2[i].p_paddr;
2851
2852 if (displacement_vaddr == displacement_paddr)
2853 displacement = displacement_vaddr;
2854
2855 break;
2856 }
2857
2858 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2859
2860 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2861 {
2862 Elf64_External_Phdr *phdrp;
2863 Elf64_External_Phdr *phdr2p;
2864 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2865 CORE_ADDR vaddr, paddr;
2866 asection *plt2_asect;
2867
2868 phdrp = &((Elf64_External_Phdr *) buf)[i];
2869 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2870 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2871 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2872
2873 /* PT_GNU_STACK is an exception by being never relocated by
2874 prelink as its addresses are always zero. */
2875
2876 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2877 continue;
2878
2879 /* Check also other adjustment combinations - PR 11786. */
2880
2881 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2882 byte_order);
2883 vaddr -= displacement;
2884 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2885
2886 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2887 byte_order);
2888 paddr -= displacement;
2889 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2890
2891 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2892 continue;
2893
2894 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2895 CentOS-5 has problems with filesz, memsz as well.
2896 See PR 11786. */
2897 if (phdr2[i].p_type == PT_GNU_RELRO)
2898 {
2899 Elf64_External_Phdr tmp_phdr = *phdrp;
2900 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2901
2902 memset (tmp_phdr.p_filesz, 0, 8);
2903 memset (tmp_phdr.p_memsz, 0, 8);
2904 memset (tmp_phdr.p_flags, 0, 4);
2905 memset (tmp_phdr.p_align, 0, 8);
2906 memset (tmp_phdr2.p_filesz, 0, 8);
2907 memset (tmp_phdr2.p_memsz, 0, 8);
2908 memset (tmp_phdr2.p_flags, 0, 4);
2909 memset (tmp_phdr2.p_align, 0, 8);
2910
2911 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2912 == 0)
2913 continue;
2914 }
2915
2916 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2917 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2918 if (plt2_asect)
2919 {
2920 int content2;
2921 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2922 CORE_ADDR filesz;
2923
2924 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2925 & SEC_HAS_CONTENTS) != 0;
2926
2927 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2928 byte_order);
2929
2930 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2931 FILESZ is from the in-memory image. */
2932 if (content2)
2933 filesz += bfd_get_section_size (plt2_asect);
2934 else
2935 filesz -= bfd_get_section_size (plt2_asect);
2936
2937 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2938 filesz);
2939
2940 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2941 continue;
2942 }
2943
2944 ok = 0;
2945 break;
2946 }
2947 }
2948 else
2949 ok = 0;
2950 }
2951
2952 xfree (buf);
2953 xfree (buf2);
2954
2955 if (!ok)
2956 return 0;
2957 }
2958
2959 if (info_verbose)
2960 {
2961 /* It can be printed repeatedly as there is no easy way to check
2962 the executable symbols/file has been already relocated to
2963 displacement. */
2964
2965 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2966 "displacement %s for \"%s\".\n"),
2967 paddress (target_gdbarch (), exec_displacement),
2968 bfd_get_filename (exec_bfd));
2969 }
2970
2971 *displacementp = exec_displacement;
2972 return 1;
2973 }
2974
2975 /* Relocate the main executable. This function should be called upon
2976 stopping the inferior process at the entry point to the program.
2977 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2978 different, the main executable is relocated by the proper amount. */
2979
2980 static void
2981 svr4_relocate_main_executable (void)
2982 {
2983 CORE_ADDR displacement;
2984
2985 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2986 probably contains the offsets computed using the PIE displacement
2987 from the previous run, which of course are irrelevant for this run.
2988 So we need to determine the new PIE displacement and recompute the
2989 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2990 already contains pre-computed offsets.
2991
2992 If we cannot compute the PIE displacement, either:
2993
2994 - The executable is not PIE.
2995
2996 - SYMFILE_OBJFILE does not match the executable started in the target.
2997 This can happen for main executable symbols loaded at the host while
2998 `ld.so --ld-args main-executable' is loaded in the target.
2999
3000 Then we leave the section offsets untouched and use them as is for
3001 this run. Either:
3002
3003 - These section offsets were properly reset earlier, and thus
3004 already contain the correct values. This can happen for instance
3005 when reconnecting via the remote protocol to a target that supports
3006 the `qOffsets' packet.
3007
3008 - The section offsets were not reset earlier, and the best we can
3009 hope is that the old offsets are still applicable to the new run. */
3010
3011 if (! svr4_exec_displacement (&displacement))
3012 return;
3013
3014 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
3015 addresses. */
3016
3017 if (symfile_objfile)
3018 {
3019 struct section_offsets *new_offsets;
3020 int i;
3021
3022 new_offsets = XALLOCAVEC (struct section_offsets,
3023 symfile_objfile->num_sections);
3024
3025 for (i = 0; i < symfile_objfile->num_sections; i++)
3026 new_offsets->offsets[i] = displacement;
3027
3028 objfile_relocate (symfile_objfile, new_offsets);
3029 }
3030 else if (exec_bfd)
3031 {
3032 asection *asect;
3033
3034 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
3035 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
3036 (bfd_section_vma (exec_bfd, asect)
3037 + displacement));
3038 }
3039 }
3040
3041 /* Implement the "create_inferior_hook" target_solib_ops method.
3042
3043 For SVR4 executables, this first instruction is either the first
3044 instruction in the dynamic linker (for dynamically linked
3045 executables) or the instruction at "start" for statically linked
3046 executables. For dynamically linked executables, the system
3047 first exec's /lib/libc.so.N, which contains the dynamic linker,
3048 and starts it running. The dynamic linker maps in any needed
3049 shared libraries, maps in the actual user executable, and then
3050 jumps to "start" in the user executable.
3051
3052 We can arrange to cooperate with the dynamic linker to discover the
3053 names of shared libraries that are dynamically linked, and the base
3054 addresses to which they are linked.
3055
3056 This function is responsible for discovering those names and
3057 addresses, and saving sufficient information about them to allow
3058 their symbols to be read at a later time. */
3059
3060 static void
3061 svr4_solib_create_inferior_hook (int from_tty)
3062 {
3063 struct svr4_info *info;
3064
3065 info = get_svr4_info ();
3066
3067 /* Clear the probes-based interface's state. */
3068 free_probes_table (info);
3069 free_solib_list (info);
3070
3071 /* Relocate the main executable if necessary. */
3072 svr4_relocate_main_executable ();
3073
3074 /* No point setting a breakpoint in the dynamic linker if we can't
3075 hit it (e.g., a core file, or a trace file). */
3076 if (!target_has_execution)
3077 return;
3078
3079 if (!svr4_have_link_map_offsets ())
3080 return;
3081
3082 if (!enable_break (info, from_tty))
3083 return;
3084 }
3085
3086 static void
3087 svr4_clear_solib (void)
3088 {
3089 struct svr4_info *info;
3090
3091 info = get_svr4_info ();
3092 info->debug_base = 0;
3093 info->debug_loader_offset_p = 0;
3094 info->debug_loader_offset = 0;
3095 xfree (info->debug_loader_name);
3096 info->debug_loader_name = NULL;
3097 }
3098
3099 /* Clear any bits of ADDR that wouldn't fit in a target-format
3100 data pointer. "Data pointer" here refers to whatever sort of
3101 address the dynamic linker uses to manage its sections. At the
3102 moment, we don't support shared libraries on any processors where
3103 code and data pointers are different sizes.
3104
3105 This isn't really the right solution. What we really need here is
3106 a way to do arithmetic on CORE_ADDR values that respects the
3107 natural pointer/address correspondence. (For example, on the MIPS,
3108 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3109 sign-extend the value. There, simply truncating the bits above
3110 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3111 be a new gdbarch method or something. */
3112 static CORE_ADDR
3113 svr4_truncate_ptr (CORE_ADDR addr)
3114 {
3115 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3116 /* We don't need to truncate anything, and the bit twiddling below
3117 will fail due to overflow problems. */
3118 return addr;
3119 else
3120 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3121 }
3122
3123
3124 static void
3125 svr4_relocate_section_addresses (struct so_list *so,
3126 struct target_section *sec)
3127 {
3128 bfd *abfd = sec->the_bfd_section->owner;
3129
3130 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3131 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3132 }
3133 \f
3134
3135 /* Architecture-specific operations. */
3136
3137 /* Per-architecture data key. */
3138 static struct gdbarch_data *solib_svr4_data;
3139
3140 struct solib_svr4_ops
3141 {
3142 /* Return a description of the layout of `struct link_map'. */
3143 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3144 };
3145
3146 /* Return a default for the architecture-specific operations. */
3147
3148 static void *
3149 solib_svr4_init (struct obstack *obstack)
3150 {
3151 struct solib_svr4_ops *ops;
3152
3153 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3154 ops->fetch_link_map_offsets = NULL;
3155 return ops;
3156 }
3157
3158 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3159 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3160
3161 void
3162 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3163 struct link_map_offsets *(*flmo) (void))
3164 {
3165 struct solib_svr4_ops *ops
3166 = (struct solib_svr4_ops *) gdbarch_data (gdbarch, solib_svr4_data);
3167
3168 ops->fetch_link_map_offsets = flmo;
3169
3170 set_solib_ops (gdbarch, &svr4_so_ops);
3171 }
3172
3173 /* Fetch a link_map_offsets structure using the architecture-specific
3174 `struct link_map_offsets' fetcher. */
3175
3176 static struct link_map_offsets *
3177 svr4_fetch_link_map_offsets (void)
3178 {
3179 struct solib_svr4_ops *ops
3180 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3181 solib_svr4_data);
3182
3183 gdb_assert (ops->fetch_link_map_offsets);
3184 return ops->fetch_link_map_offsets ();
3185 }
3186
3187 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3188
3189 static int
3190 svr4_have_link_map_offsets (void)
3191 {
3192 struct solib_svr4_ops *ops
3193 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3194 solib_svr4_data);
3195
3196 return (ops->fetch_link_map_offsets != NULL);
3197 }
3198 \f
3199
3200 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3201 `struct r_debug' and a `struct link_map' that are binary compatible
3202 with the origional SVR4 implementation. */
3203
3204 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3205 for an ILP32 SVR4 system. */
3206
3207 struct link_map_offsets *
3208 svr4_ilp32_fetch_link_map_offsets (void)
3209 {
3210 static struct link_map_offsets lmo;
3211 static struct link_map_offsets *lmp = NULL;
3212
3213 if (lmp == NULL)
3214 {
3215 lmp = &lmo;
3216
3217 lmo.r_version_offset = 0;
3218 lmo.r_version_size = 4;
3219 lmo.r_map_offset = 4;
3220 lmo.r_brk_offset = 8;
3221 lmo.r_ldsomap_offset = 20;
3222
3223 /* Everything we need is in the first 20 bytes. */
3224 lmo.link_map_size = 20;
3225 lmo.l_addr_offset = 0;
3226 lmo.l_name_offset = 4;
3227 lmo.l_ld_offset = 8;
3228 lmo.l_next_offset = 12;
3229 lmo.l_prev_offset = 16;
3230 }
3231
3232 return lmp;
3233 }
3234
3235 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3236 for an LP64 SVR4 system. */
3237
3238 struct link_map_offsets *
3239 svr4_lp64_fetch_link_map_offsets (void)
3240 {
3241 static struct link_map_offsets lmo;
3242 static struct link_map_offsets *lmp = NULL;
3243
3244 if (lmp == NULL)
3245 {
3246 lmp = &lmo;
3247
3248 lmo.r_version_offset = 0;
3249 lmo.r_version_size = 4;
3250 lmo.r_map_offset = 8;
3251 lmo.r_brk_offset = 16;
3252 lmo.r_ldsomap_offset = 40;
3253
3254 /* Everything we need is in the first 40 bytes. */
3255 lmo.link_map_size = 40;
3256 lmo.l_addr_offset = 0;
3257 lmo.l_name_offset = 8;
3258 lmo.l_ld_offset = 16;
3259 lmo.l_next_offset = 24;
3260 lmo.l_prev_offset = 32;
3261 }
3262
3263 return lmp;
3264 }
3265 \f
3266
3267 struct target_so_ops svr4_so_ops;
3268
3269 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3270 different rule for symbol lookup. The lookup begins here in the DSO, not in
3271 the main executable. */
3272
3273 static struct block_symbol
3274 elf_lookup_lib_symbol (struct objfile *objfile,
3275 const char *name,
3276 const domain_enum domain)
3277 {
3278 bfd *abfd;
3279
3280 if (objfile == symfile_objfile)
3281 abfd = exec_bfd;
3282 else
3283 {
3284 /* OBJFILE should have been passed as the non-debug one. */
3285 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3286
3287 abfd = objfile->obfd;
3288 }
3289
3290 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL, NULL) != 1)
3291 return (struct block_symbol) {NULL, NULL};
3292
3293 return lookup_global_symbol_from_objfile (objfile, name, domain);
3294 }
3295
3296 void
3297 _initialize_svr4_solib (void)
3298 {
3299 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3300 solib_svr4_pspace_data
3301 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3302
3303 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3304 svr4_so_ops.free_so = svr4_free_so;
3305 svr4_so_ops.clear_so = svr4_clear_so;
3306 svr4_so_ops.clear_solib = svr4_clear_solib;
3307 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3308 svr4_so_ops.current_sos = svr4_current_sos;
3309 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3310 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3311 svr4_so_ops.bfd_open = solib_bfd_open;
3312 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3313 svr4_so_ops.same = svr4_same;
3314 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3315 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3316 svr4_so_ops.handle_event = svr4_handle_solib_event;
3317 }
This page took 0.110794 seconds and 4 git commands to generate.