[AArch64] Remove unused variable.
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* What to do when a probe stop occurs. */
110
111 enum probe_action
112 {
113 /* Something went seriously wrong. Stop using probes and
114 revert to using the older interface. */
115 PROBES_INTERFACE_FAILED,
116
117 /* No action is required. The shared object list is still
118 valid. */
119 DO_NOTHING,
120
121 /* The shared object list should be reloaded entirely. */
122 FULL_RELOAD,
123
124 /* Attempt to incrementally update the shared object list. If
125 the update fails or is not possible, fall back to reloading
126 the list in full. */
127 UPDATE_OR_RELOAD,
128 };
129
130 /* A probe's name and its associated action. */
131
132 struct probe_info
133 {
134 /* The name of the probe. */
135 const char *name;
136
137 /* What to do when a probe stop occurs. */
138 enum probe_action action;
139 };
140
141 /* A list of named probes and their associated actions. If all
142 probes are present in the dynamic linker then the probes-based
143 interface will be used. */
144
145 static const struct probe_info probe_info[] =
146 {
147 { "init_start", DO_NOTHING },
148 { "init_complete", FULL_RELOAD },
149 { "map_start", DO_NOTHING },
150 { "map_failed", DO_NOTHING },
151 { "reloc_complete", UPDATE_OR_RELOAD },
152 { "unmap_start", DO_NOTHING },
153 { "unmap_complete", FULL_RELOAD },
154 };
155
156 #define NUM_PROBES ARRAY_SIZE (probe_info)
157
158 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
159 the same shared library. */
160
161 static int
162 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
163 {
164 if (strcmp (gdb_so_name, inferior_so_name) == 0)
165 return 1;
166
167 /* On Solaris, when starting inferior we think that dynamic linker is
168 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
169 contains /lib/ld.so.1. Sometimes one file is a link to another, but
170 sometimes they have identical content, but are not linked to each
171 other. We don't restrict this check for Solaris, but the chances
172 of running into this situation elsewhere are very low. */
173 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
174 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
175 return 1;
176
177 /* Similarly, we observed the same issue with sparc64, but with
178 different locations. */
179 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
180 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
181 return 1;
182
183 return 0;
184 }
185
186 static int
187 svr4_same (struct so_list *gdb, struct so_list *inferior)
188 {
189 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
190 }
191
192 static struct lm_info *
193 lm_info_read (CORE_ADDR lm_addr)
194 {
195 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
196 gdb_byte *lm;
197 struct lm_info *lm_info;
198 struct cleanup *back_to;
199
200 lm = xmalloc (lmo->link_map_size);
201 back_to = make_cleanup (xfree, lm);
202
203 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
204 {
205 warning (_("Error reading shared library list entry at %s"),
206 paddress (target_gdbarch (), lm_addr)),
207 lm_info = NULL;
208 }
209 else
210 {
211 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
212
213 lm_info = xzalloc (sizeof (*lm_info));
214 lm_info->lm_addr = lm_addr;
215
216 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
217 ptr_type);
218 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
219 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
220 ptr_type);
221 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
222 ptr_type);
223 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
224 ptr_type);
225 }
226
227 do_cleanups (back_to);
228
229 return lm_info;
230 }
231
232 static int
233 has_lm_dynamic_from_link_map (void)
234 {
235 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
236
237 return lmo->l_ld_offset >= 0;
238 }
239
240 static CORE_ADDR
241 lm_addr_check (const struct so_list *so, bfd *abfd)
242 {
243 if (!so->lm_info->l_addr_p)
244 {
245 struct bfd_section *dyninfo_sect;
246 CORE_ADDR l_addr, l_dynaddr, dynaddr;
247
248 l_addr = so->lm_info->l_addr_inferior;
249
250 if (! abfd || ! has_lm_dynamic_from_link_map ())
251 goto set_addr;
252
253 l_dynaddr = so->lm_info->l_ld;
254
255 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
256 if (dyninfo_sect == NULL)
257 goto set_addr;
258
259 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
260
261 if (dynaddr + l_addr != l_dynaddr)
262 {
263 CORE_ADDR align = 0x1000;
264 CORE_ADDR minpagesize = align;
265
266 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
267 {
268 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
269 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
270 int i;
271
272 align = 1;
273
274 for (i = 0; i < ehdr->e_phnum; i++)
275 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
276 align = phdr[i].p_align;
277
278 minpagesize = get_elf_backend_data (abfd)->minpagesize;
279 }
280
281 /* Turn it into a mask. */
282 align--;
283
284 /* If the changes match the alignment requirements, we
285 assume we're using a core file that was generated by the
286 same binary, just prelinked with a different base offset.
287 If it doesn't match, we may have a different binary, the
288 same binary with the dynamic table loaded at an unrelated
289 location, or anything, really. To avoid regressions,
290 don't adjust the base offset in the latter case, although
291 odds are that, if things really changed, debugging won't
292 quite work.
293
294 One could expect more the condition
295 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
296 but the one below is relaxed for PPC. The PPC kernel supports
297 either 4k or 64k page sizes. To be prepared for 64k pages,
298 PPC ELF files are built using an alignment requirement of 64k.
299 However, when running on a kernel supporting 4k pages, the memory
300 mapping of the library may not actually happen on a 64k boundary!
301
302 (In the usual case where (l_addr & align) == 0, this check is
303 equivalent to the possibly expected check above.)
304
305 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
306
307 l_addr = l_dynaddr - dynaddr;
308
309 if ((l_addr & (minpagesize - 1)) == 0
310 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
311 {
312 if (info_verbose)
313 printf_unfiltered (_("Using PIC (Position Independent Code) "
314 "prelink displacement %s for \"%s\".\n"),
315 paddress (target_gdbarch (), l_addr),
316 so->so_name);
317 }
318 else
319 {
320 /* There is no way to verify the library file matches. prelink
321 can during prelinking of an unprelinked file (or unprelinking
322 of a prelinked file) shift the DYNAMIC segment by arbitrary
323 offset without any page size alignment. There is no way to
324 find out the ELF header and/or Program Headers for a limited
325 verification if it they match. One could do a verification
326 of the DYNAMIC segment. Still the found address is the best
327 one GDB could find. */
328
329 warning (_(".dynamic section for \"%s\" "
330 "is not at the expected address "
331 "(wrong library or version mismatch?)"), so->so_name);
332 }
333 }
334
335 set_addr:
336 so->lm_info->l_addr = l_addr;
337 so->lm_info->l_addr_p = 1;
338 }
339
340 return so->lm_info->l_addr;
341 }
342
343 /* Per pspace SVR4 specific data. */
344
345 struct svr4_info
346 {
347 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
348
349 /* Validity flag for debug_loader_offset. */
350 int debug_loader_offset_p;
351
352 /* Load address for the dynamic linker, inferred. */
353 CORE_ADDR debug_loader_offset;
354
355 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
356 char *debug_loader_name;
357
358 /* Load map address for the main executable. */
359 CORE_ADDR main_lm_addr;
360
361 CORE_ADDR interp_text_sect_low;
362 CORE_ADDR interp_text_sect_high;
363 CORE_ADDR interp_plt_sect_low;
364 CORE_ADDR interp_plt_sect_high;
365
366 /* Nonzero if the list of objects was last obtained from the target
367 via qXfer:libraries-svr4:read. */
368 int using_xfer;
369
370 /* Table of struct probe_and_action instances, used by the
371 probes-based interface to map breakpoint addresses to probes
372 and their associated actions. Lookup is performed using
373 probe_and_action->probe->address. */
374 htab_t probes_table;
375
376 /* List of objects loaded into the inferior, used by the probes-
377 based interface. */
378 struct so_list *solib_list;
379 };
380
381 /* Per-program-space data key. */
382 static const struct program_space_data *solib_svr4_pspace_data;
383
384 /* Free the probes table. */
385
386 static void
387 free_probes_table (struct svr4_info *info)
388 {
389 if (info->probes_table == NULL)
390 return;
391
392 htab_delete (info->probes_table);
393 info->probes_table = NULL;
394 }
395
396 /* Free the solib list. */
397
398 static void
399 free_solib_list (struct svr4_info *info)
400 {
401 svr4_free_library_list (&info->solib_list);
402 info->solib_list = NULL;
403 }
404
405 static void
406 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
407 {
408 struct svr4_info *info = arg;
409
410 free_probes_table (info);
411 free_solib_list (info);
412
413 xfree (info);
414 }
415
416 /* Get the current svr4 data. If none is found yet, add it now. This
417 function always returns a valid object. */
418
419 static struct svr4_info *
420 get_svr4_info (void)
421 {
422 struct svr4_info *info;
423
424 info = program_space_data (current_program_space, solib_svr4_pspace_data);
425 if (info != NULL)
426 return info;
427
428 info = XCNEW (struct svr4_info);
429 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
430 return info;
431 }
432
433 /* Local function prototypes */
434
435 static int match_main (const char *);
436
437 /* Read program header TYPE from inferior memory. The header is found
438 by scanning the OS auxillary vector.
439
440 If TYPE == -1, return the program headers instead of the contents of
441 one program header.
442
443 Return a pointer to allocated memory holding the program header contents,
444 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
445 size of those contents is returned to P_SECT_SIZE. Likewise, the target
446 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
447
448 static gdb_byte *
449 read_program_header (int type, int *p_sect_size, int *p_arch_size)
450 {
451 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
452 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
453 int arch_size, sect_size;
454 CORE_ADDR sect_addr;
455 gdb_byte *buf;
456 int pt_phdr_p = 0;
457
458 /* Get required auxv elements from target. */
459 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
460 return 0;
461 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
462 return 0;
463 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
464 return 0;
465 if (!at_phdr || !at_phnum)
466 return 0;
467
468 /* Determine ELF architecture type. */
469 if (at_phent == sizeof (Elf32_External_Phdr))
470 arch_size = 32;
471 else if (at_phent == sizeof (Elf64_External_Phdr))
472 arch_size = 64;
473 else
474 return 0;
475
476 /* Find the requested segment. */
477 if (type == -1)
478 {
479 sect_addr = at_phdr;
480 sect_size = at_phent * at_phnum;
481 }
482 else if (arch_size == 32)
483 {
484 Elf32_External_Phdr phdr;
485 int i;
486
487 /* Search for requested PHDR. */
488 for (i = 0; i < at_phnum; i++)
489 {
490 int p_type;
491
492 if (target_read_memory (at_phdr + i * sizeof (phdr),
493 (gdb_byte *)&phdr, sizeof (phdr)))
494 return 0;
495
496 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
497 4, byte_order);
498
499 if (p_type == PT_PHDR)
500 {
501 pt_phdr_p = 1;
502 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
503 4, byte_order);
504 }
505
506 if (p_type == type)
507 break;
508 }
509
510 if (i == at_phnum)
511 return 0;
512
513 /* Retrieve address and size. */
514 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
515 4, byte_order);
516 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
517 4, byte_order);
518 }
519 else
520 {
521 Elf64_External_Phdr phdr;
522 int i;
523
524 /* Search for requested PHDR. */
525 for (i = 0; i < at_phnum; i++)
526 {
527 int p_type;
528
529 if (target_read_memory (at_phdr + i * sizeof (phdr),
530 (gdb_byte *)&phdr, sizeof (phdr)))
531 return 0;
532
533 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
534 4, byte_order);
535
536 if (p_type == PT_PHDR)
537 {
538 pt_phdr_p = 1;
539 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
540 8, byte_order);
541 }
542
543 if (p_type == type)
544 break;
545 }
546
547 if (i == at_phnum)
548 return 0;
549
550 /* Retrieve address and size. */
551 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
552 8, byte_order);
553 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
554 8, byte_order);
555 }
556
557 /* PT_PHDR is optional, but we really need it
558 for PIE to make this work in general. */
559
560 if (pt_phdr_p)
561 {
562 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
563 Relocation offset is the difference between the two. */
564 sect_addr = sect_addr + (at_phdr - pt_phdr);
565 }
566
567 /* Read in requested program header. */
568 buf = xmalloc (sect_size);
569 if (target_read_memory (sect_addr, buf, sect_size))
570 {
571 xfree (buf);
572 return NULL;
573 }
574
575 if (p_arch_size)
576 *p_arch_size = arch_size;
577 if (p_sect_size)
578 *p_sect_size = sect_size;
579
580 return buf;
581 }
582
583
584 /* Return program interpreter string. */
585 static char *
586 find_program_interpreter (void)
587 {
588 gdb_byte *buf = NULL;
589
590 /* If we have an exec_bfd, use its section table. */
591 if (exec_bfd
592 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
593 {
594 struct bfd_section *interp_sect;
595
596 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
597 if (interp_sect != NULL)
598 {
599 int sect_size = bfd_section_size (exec_bfd, interp_sect);
600
601 buf = xmalloc (sect_size);
602 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
603 }
604 }
605
606 /* If we didn't find it, use the target auxillary vector. */
607 if (!buf)
608 buf = read_program_header (PT_INTERP, NULL, NULL);
609
610 return (char *) buf;
611 }
612
613
614 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
615 found, 1 is returned and the corresponding PTR is set. */
616
617 static int
618 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr)
619 {
620 int arch_size, step, sect_size;
621 long current_dyntag;
622 CORE_ADDR dyn_ptr, dyn_addr;
623 gdb_byte *bufend, *bufstart, *buf;
624 Elf32_External_Dyn *x_dynp_32;
625 Elf64_External_Dyn *x_dynp_64;
626 struct bfd_section *sect;
627 struct target_section *target_section;
628
629 if (abfd == NULL)
630 return 0;
631
632 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
633 return 0;
634
635 arch_size = bfd_get_arch_size (abfd);
636 if (arch_size == -1)
637 return 0;
638
639 /* Find the start address of the .dynamic section. */
640 sect = bfd_get_section_by_name (abfd, ".dynamic");
641 if (sect == NULL)
642 return 0;
643
644 for (target_section = current_target_sections->sections;
645 target_section < current_target_sections->sections_end;
646 target_section++)
647 if (sect == target_section->the_bfd_section)
648 break;
649 if (target_section < current_target_sections->sections_end)
650 dyn_addr = target_section->addr;
651 else
652 {
653 /* ABFD may come from OBJFILE acting only as a symbol file without being
654 loaded into the target (see add_symbol_file_command). This case is
655 such fallback to the file VMA address without the possibility of
656 having the section relocated to its actual in-memory address. */
657
658 dyn_addr = bfd_section_vma (abfd, sect);
659 }
660
661 /* Read in .dynamic from the BFD. We will get the actual value
662 from memory later. */
663 sect_size = bfd_section_size (abfd, sect);
664 buf = bufstart = alloca (sect_size);
665 if (!bfd_get_section_contents (abfd, sect,
666 buf, 0, sect_size))
667 return 0;
668
669 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
670 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
671 : sizeof (Elf64_External_Dyn);
672 for (bufend = buf + sect_size;
673 buf < bufend;
674 buf += step)
675 {
676 if (arch_size == 32)
677 {
678 x_dynp_32 = (Elf32_External_Dyn *) buf;
679 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
680 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
681 }
682 else
683 {
684 x_dynp_64 = (Elf64_External_Dyn *) buf;
685 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
686 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
687 }
688 if (current_dyntag == DT_NULL)
689 return 0;
690 if (current_dyntag == desired_dyntag)
691 {
692 /* If requested, try to read the runtime value of this .dynamic
693 entry. */
694 if (ptr)
695 {
696 struct type *ptr_type;
697 gdb_byte ptr_buf[8];
698 CORE_ADDR ptr_addr;
699
700 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
701 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
702 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
703 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
704 *ptr = dyn_ptr;
705 }
706 return 1;
707 }
708 }
709
710 return 0;
711 }
712
713 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
714 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
715 is returned and the corresponding PTR is set. */
716
717 static int
718 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr)
719 {
720 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
721 int sect_size, arch_size, step;
722 long current_dyntag;
723 CORE_ADDR dyn_ptr;
724 gdb_byte *bufend, *bufstart, *buf;
725
726 /* Read in .dynamic section. */
727 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
728 if (!buf)
729 return 0;
730
731 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
732 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
733 : sizeof (Elf64_External_Dyn);
734 for (bufend = buf + sect_size;
735 buf < bufend;
736 buf += step)
737 {
738 if (arch_size == 32)
739 {
740 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
741
742 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
743 4, byte_order);
744 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
745 4, byte_order);
746 }
747 else
748 {
749 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
750
751 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
752 8, byte_order);
753 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
754 8, byte_order);
755 }
756 if (current_dyntag == DT_NULL)
757 break;
758
759 if (current_dyntag == desired_dyntag)
760 {
761 if (ptr)
762 *ptr = dyn_ptr;
763
764 xfree (bufstart);
765 return 1;
766 }
767 }
768
769 xfree (bufstart);
770 return 0;
771 }
772
773 /* Locate the base address of dynamic linker structs for SVR4 elf
774 targets.
775
776 For SVR4 elf targets the address of the dynamic linker's runtime
777 structure is contained within the dynamic info section in the
778 executable file. The dynamic section is also mapped into the
779 inferior address space. Because the runtime loader fills in the
780 real address before starting the inferior, we have to read in the
781 dynamic info section from the inferior address space.
782 If there are any errors while trying to find the address, we
783 silently return 0, otherwise the found address is returned. */
784
785 static CORE_ADDR
786 elf_locate_base (void)
787 {
788 struct bound_minimal_symbol msymbol;
789 CORE_ADDR dyn_ptr;
790
791 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
792 instead of DT_DEBUG, although they sometimes contain an unused
793 DT_DEBUG. */
794 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
795 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
796 {
797 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
798 gdb_byte *pbuf;
799 int pbuf_size = TYPE_LENGTH (ptr_type);
800
801 pbuf = alloca (pbuf_size);
802 /* DT_MIPS_RLD_MAP contains a pointer to the address
803 of the dynamic link structure. */
804 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
805 return 0;
806 return extract_typed_address (pbuf, ptr_type);
807 }
808
809 /* Find DT_DEBUG. */
810 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
811 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
812 return dyn_ptr;
813
814 /* This may be a static executable. Look for the symbol
815 conventionally named _r_debug, as a last resort. */
816 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
817 if (msymbol.minsym != NULL)
818 return BMSYMBOL_VALUE_ADDRESS (msymbol);
819
820 /* DT_DEBUG entry not found. */
821 return 0;
822 }
823
824 /* Locate the base address of dynamic linker structs.
825
826 For both the SunOS and SVR4 shared library implementations, if the
827 inferior executable has been linked dynamically, there is a single
828 address somewhere in the inferior's data space which is the key to
829 locating all of the dynamic linker's runtime structures. This
830 address is the value of the debug base symbol. The job of this
831 function is to find and return that address, or to return 0 if there
832 is no such address (the executable is statically linked for example).
833
834 For SunOS, the job is almost trivial, since the dynamic linker and
835 all of it's structures are statically linked to the executable at
836 link time. Thus the symbol for the address we are looking for has
837 already been added to the minimal symbol table for the executable's
838 objfile at the time the symbol file's symbols were read, and all we
839 have to do is look it up there. Note that we explicitly do NOT want
840 to find the copies in the shared library.
841
842 The SVR4 version is a bit more complicated because the address
843 is contained somewhere in the dynamic info section. We have to go
844 to a lot more work to discover the address of the debug base symbol.
845 Because of this complexity, we cache the value we find and return that
846 value on subsequent invocations. Note there is no copy in the
847 executable symbol tables. */
848
849 static CORE_ADDR
850 locate_base (struct svr4_info *info)
851 {
852 /* Check to see if we have a currently valid address, and if so, avoid
853 doing all this work again and just return the cached address. If
854 we have no cached address, try to locate it in the dynamic info
855 section for ELF executables. There's no point in doing any of this
856 though if we don't have some link map offsets to work with. */
857
858 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
859 info->debug_base = elf_locate_base ();
860 return info->debug_base;
861 }
862
863 /* Find the first element in the inferior's dynamic link map, and
864 return its address in the inferior. Return zero if the address
865 could not be determined.
866
867 FIXME: Perhaps we should validate the info somehow, perhaps by
868 checking r_version for a known version number, or r_state for
869 RT_CONSISTENT. */
870
871 static CORE_ADDR
872 solib_svr4_r_map (struct svr4_info *info)
873 {
874 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
875 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
876 CORE_ADDR addr = 0;
877
878 TRY
879 {
880 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
881 ptr_type);
882 }
883 CATCH (ex, RETURN_MASK_ERROR)
884 {
885 exception_print (gdb_stderr, ex);
886 }
887 END_CATCH
888
889 return addr;
890 }
891
892 /* Find r_brk from the inferior's debug base. */
893
894 static CORE_ADDR
895 solib_svr4_r_brk (struct svr4_info *info)
896 {
897 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
898 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
899
900 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
901 ptr_type);
902 }
903
904 /* Find the link map for the dynamic linker (if it is not in the
905 normal list of loaded shared objects). */
906
907 static CORE_ADDR
908 solib_svr4_r_ldsomap (struct svr4_info *info)
909 {
910 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
911 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
912 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
913 ULONGEST version;
914
915 /* Check version, and return zero if `struct r_debug' doesn't have
916 the r_ldsomap member. */
917 version
918 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
919 lmo->r_version_size, byte_order);
920 if (version < 2 || lmo->r_ldsomap_offset == -1)
921 return 0;
922
923 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
924 ptr_type);
925 }
926
927 /* On Solaris systems with some versions of the dynamic linker,
928 ld.so's l_name pointer points to the SONAME in the string table
929 rather than into writable memory. So that GDB can find shared
930 libraries when loading a core file generated by gcore, ensure that
931 memory areas containing the l_name string are saved in the core
932 file. */
933
934 static int
935 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
936 {
937 struct svr4_info *info;
938 CORE_ADDR ldsomap;
939 struct so_list *newobj;
940 struct cleanup *old_chain;
941 CORE_ADDR name_lm;
942
943 info = get_svr4_info ();
944
945 info->debug_base = 0;
946 locate_base (info);
947 if (!info->debug_base)
948 return 0;
949
950 ldsomap = solib_svr4_r_ldsomap (info);
951 if (!ldsomap)
952 return 0;
953
954 newobj = XCNEW (struct so_list);
955 old_chain = make_cleanup (xfree, newobj);
956 newobj->lm_info = lm_info_read (ldsomap);
957 make_cleanup (xfree, newobj->lm_info);
958 name_lm = newobj->lm_info ? newobj->lm_info->l_name : 0;
959 do_cleanups (old_chain);
960
961 return (name_lm >= vaddr && name_lm < vaddr + size);
962 }
963
964 /* Implement the "open_symbol_file_object" target_so_ops method.
965
966 If no open symbol file, attempt to locate and open the main symbol
967 file. On SVR4 systems, this is the first link map entry. If its
968 name is here, we can open it. Useful when attaching to a process
969 without first loading its symbol file. */
970
971 static int
972 open_symbol_file_object (void *from_ttyp)
973 {
974 CORE_ADDR lm, l_name;
975 char *filename;
976 int errcode;
977 int from_tty = *(int *)from_ttyp;
978 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
979 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
980 int l_name_size = TYPE_LENGTH (ptr_type);
981 gdb_byte *l_name_buf = xmalloc (l_name_size);
982 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
983 struct svr4_info *info = get_svr4_info ();
984
985 if (symfile_objfile)
986 if (!query (_("Attempt to reload symbols from process? ")))
987 {
988 do_cleanups (cleanups);
989 return 0;
990 }
991
992 /* Always locate the debug struct, in case it has moved. */
993 info->debug_base = 0;
994 if (locate_base (info) == 0)
995 {
996 do_cleanups (cleanups);
997 return 0; /* failed somehow... */
998 }
999
1000 /* First link map member should be the executable. */
1001 lm = solib_svr4_r_map (info);
1002 if (lm == 0)
1003 {
1004 do_cleanups (cleanups);
1005 return 0; /* failed somehow... */
1006 }
1007
1008 /* Read address of name from target memory to GDB. */
1009 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1010
1011 /* Convert the address to host format. */
1012 l_name = extract_typed_address (l_name_buf, ptr_type);
1013
1014 if (l_name == 0)
1015 {
1016 do_cleanups (cleanups);
1017 return 0; /* No filename. */
1018 }
1019
1020 /* Now fetch the filename from target memory. */
1021 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1022 make_cleanup (xfree, filename);
1023
1024 if (errcode)
1025 {
1026 warning (_("failed to read exec filename from attached file: %s"),
1027 safe_strerror (errcode));
1028 do_cleanups (cleanups);
1029 return 0;
1030 }
1031
1032 /* Have a pathname: read the symbol file. */
1033 symbol_file_add_main (filename, from_tty);
1034
1035 do_cleanups (cleanups);
1036 return 1;
1037 }
1038
1039 /* Data exchange structure for the XML parser as returned by
1040 svr4_current_sos_via_xfer_libraries. */
1041
1042 struct svr4_library_list
1043 {
1044 struct so_list *head, **tailp;
1045
1046 /* Inferior address of struct link_map used for the main executable. It is
1047 NULL if not known. */
1048 CORE_ADDR main_lm;
1049 };
1050
1051 /* Implementation for target_so_ops.free_so. */
1052
1053 static void
1054 svr4_free_so (struct so_list *so)
1055 {
1056 xfree (so->lm_info);
1057 }
1058
1059 /* Implement target_so_ops.clear_so. */
1060
1061 static void
1062 svr4_clear_so (struct so_list *so)
1063 {
1064 if (so->lm_info != NULL)
1065 so->lm_info->l_addr_p = 0;
1066 }
1067
1068 /* Free so_list built so far (called via cleanup). */
1069
1070 static void
1071 svr4_free_library_list (void *p_list)
1072 {
1073 struct so_list *list = *(struct so_list **) p_list;
1074
1075 while (list != NULL)
1076 {
1077 struct so_list *next = list->next;
1078
1079 free_so (list);
1080 list = next;
1081 }
1082 }
1083
1084 /* Copy library list. */
1085
1086 static struct so_list *
1087 svr4_copy_library_list (struct so_list *src)
1088 {
1089 struct so_list *dst = NULL;
1090 struct so_list **link = &dst;
1091
1092 while (src != NULL)
1093 {
1094 struct so_list *newobj;
1095
1096 newobj = xmalloc (sizeof (struct so_list));
1097 memcpy (newobj, src, sizeof (struct so_list));
1098
1099 newobj->lm_info = xmalloc (sizeof (struct lm_info));
1100 memcpy (newobj->lm_info, src->lm_info, sizeof (struct lm_info));
1101
1102 newobj->next = NULL;
1103 *link = newobj;
1104 link = &newobj->next;
1105
1106 src = src->next;
1107 }
1108
1109 return dst;
1110 }
1111
1112 #ifdef HAVE_LIBEXPAT
1113
1114 #include "xml-support.h"
1115
1116 /* Handle the start of a <library> element. Note: new elements are added
1117 at the tail of the list, keeping the list in order. */
1118
1119 static void
1120 library_list_start_library (struct gdb_xml_parser *parser,
1121 const struct gdb_xml_element *element,
1122 void *user_data, VEC(gdb_xml_value_s) *attributes)
1123 {
1124 struct svr4_library_list *list = user_data;
1125 const char *name = xml_find_attribute (attributes, "name")->value;
1126 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1127 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1128 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1129 struct so_list *new_elem;
1130
1131 new_elem = XCNEW (struct so_list);
1132 new_elem->lm_info = XCNEW (struct lm_info);
1133 new_elem->lm_info->lm_addr = *lmp;
1134 new_elem->lm_info->l_addr_inferior = *l_addrp;
1135 new_elem->lm_info->l_ld = *l_ldp;
1136
1137 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1138 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1139 strcpy (new_elem->so_original_name, new_elem->so_name);
1140
1141 *list->tailp = new_elem;
1142 list->tailp = &new_elem->next;
1143 }
1144
1145 /* Handle the start of a <library-list-svr4> element. */
1146
1147 static void
1148 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1149 const struct gdb_xml_element *element,
1150 void *user_data, VEC(gdb_xml_value_s) *attributes)
1151 {
1152 struct svr4_library_list *list = user_data;
1153 const char *version = xml_find_attribute (attributes, "version")->value;
1154 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1155
1156 if (strcmp (version, "1.0") != 0)
1157 gdb_xml_error (parser,
1158 _("SVR4 Library list has unsupported version \"%s\""),
1159 version);
1160
1161 if (main_lm)
1162 list->main_lm = *(ULONGEST *) main_lm->value;
1163 }
1164
1165 /* The allowed elements and attributes for an XML library list.
1166 The root element is a <library-list>. */
1167
1168 static const struct gdb_xml_attribute svr4_library_attributes[] =
1169 {
1170 { "name", GDB_XML_AF_NONE, NULL, NULL },
1171 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1172 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1173 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1174 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1175 };
1176
1177 static const struct gdb_xml_element svr4_library_list_children[] =
1178 {
1179 {
1180 "library", svr4_library_attributes, NULL,
1181 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1182 library_list_start_library, NULL
1183 },
1184 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1185 };
1186
1187 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1188 {
1189 { "version", GDB_XML_AF_NONE, NULL, NULL },
1190 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1191 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1192 };
1193
1194 static const struct gdb_xml_element svr4_library_list_elements[] =
1195 {
1196 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1197 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1198 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1199 };
1200
1201 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1202
1203 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1204 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1205 empty, caller is responsible for freeing all its entries. */
1206
1207 static int
1208 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1209 {
1210 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1211 &list->head);
1212
1213 memset (list, 0, sizeof (*list));
1214 list->tailp = &list->head;
1215 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1216 svr4_library_list_elements, document, list) == 0)
1217 {
1218 /* Parsed successfully, keep the result. */
1219 discard_cleanups (back_to);
1220 return 1;
1221 }
1222
1223 do_cleanups (back_to);
1224 return 0;
1225 }
1226
1227 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1228
1229 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1230 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1231 empty, caller is responsible for freeing all its entries.
1232
1233 Note that ANNEX must be NULL if the remote does not explicitly allow
1234 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1235 this can be checked using target_augmented_libraries_svr4_read (). */
1236
1237 static int
1238 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1239 const char *annex)
1240 {
1241 char *svr4_library_document;
1242 int result;
1243 struct cleanup *back_to;
1244
1245 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1246
1247 /* Fetch the list of shared libraries. */
1248 svr4_library_document = target_read_stralloc (&current_target,
1249 TARGET_OBJECT_LIBRARIES_SVR4,
1250 annex);
1251 if (svr4_library_document == NULL)
1252 return 0;
1253
1254 back_to = make_cleanup (xfree, svr4_library_document);
1255 result = svr4_parse_libraries (svr4_library_document, list);
1256 do_cleanups (back_to);
1257
1258 return result;
1259 }
1260
1261 #else
1262
1263 static int
1264 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1265 const char *annex)
1266 {
1267 return 0;
1268 }
1269
1270 #endif
1271
1272 /* If no shared library information is available from the dynamic
1273 linker, build a fallback list from other sources. */
1274
1275 static struct so_list *
1276 svr4_default_sos (void)
1277 {
1278 struct svr4_info *info = get_svr4_info ();
1279 struct so_list *newobj;
1280
1281 if (!info->debug_loader_offset_p)
1282 return NULL;
1283
1284 newobj = XCNEW (struct so_list);
1285
1286 newobj->lm_info = xzalloc (sizeof (struct lm_info));
1287
1288 /* Nothing will ever check the other fields if we set l_addr_p. */
1289 newobj->lm_info->l_addr = info->debug_loader_offset;
1290 newobj->lm_info->l_addr_p = 1;
1291
1292 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1293 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1294 strcpy (newobj->so_original_name, newobj->so_name);
1295
1296 return newobj;
1297 }
1298
1299 /* Read the whole inferior libraries chain starting at address LM.
1300 Expect the first entry in the chain's previous entry to be PREV_LM.
1301 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1302 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1303 to it. Returns nonzero upon success. If zero is returned the
1304 entries stored to LINK_PTR_PTR are still valid although they may
1305 represent only part of the inferior library list. */
1306
1307 static int
1308 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1309 struct so_list ***link_ptr_ptr, int ignore_first)
1310 {
1311 CORE_ADDR first_l_name = 0;
1312 CORE_ADDR next_lm;
1313
1314 for (; lm != 0; prev_lm = lm, lm = next_lm)
1315 {
1316 struct so_list *newobj;
1317 struct cleanup *old_chain;
1318 int errcode;
1319 char *buffer;
1320
1321 newobj = XCNEW (struct so_list);
1322 old_chain = make_cleanup_free_so (newobj);
1323
1324 newobj->lm_info = lm_info_read (lm);
1325 if (newobj->lm_info == NULL)
1326 {
1327 do_cleanups (old_chain);
1328 return 0;
1329 }
1330
1331 next_lm = newobj->lm_info->l_next;
1332
1333 if (newobj->lm_info->l_prev != prev_lm)
1334 {
1335 warning (_("Corrupted shared library list: %s != %s"),
1336 paddress (target_gdbarch (), prev_lm),
1337 paddress (target_gdbarch (), newobj->lm_info->l_prev));
1338 do_cleanups (old_chain);
1339 return 0;
1340 }
1341
1342 /* For SVR4 versions, the first entry in the link map is for the
1343 inferior executable, so we must ignore it. For some versions of
1344 SVR4, it has no name. For others (Solaris 2.3 for example), it
1345 does have a name, so we can no longer use a missing name to
1346 decide when to ignore it. */
1347 if (ignore_first && newobj->lm_info->l_prev == 0)
1348 {
1349 struct svr4_info *info = get_svr4_info ();
1350
1351 first_l_name = newobj->lm_info->l_name;
1352 info->main_lm_addr = newobj->lm_info->lm_addr;
1353 do_cleanups (old_chain);
1354 continue;
1355 }
1356
1357 /* Extract this shared object's name. */
1358 target_read_string (newobj->lm_info->l_name, &buffer,
1359 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1360 if (errcode != 0)
1361 {
1362 /* If this entry's l_name address matches that of the
1363 inferior executable, then this is not a normal shared
1364 object, but (most likely) a vDSO. In this case, silently
1365 skip it; otherwise emit a warning. */
1366 if (first_l_name == 0 || newobj->lm_info->l_name != first_l_name)
1367 warning (_("Can't read pathname for load map: %s."),
1368 safe_strerror (errcode));
1369 do_cleanups (old_chain);
1370 continue;
1371 }
1372
1373 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1374 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1375 strcpy (newobj->so_original_name, newobj->so_name);
1376 xfree (buffer);
1377
1378 /* If this entry has no name, or its name matches the name
1379 for the main executable, don't include it in the list. */
1380 if (! newobj->so_name[0] || match_main (newobj->so_name))
1381 {
1382 do_cleanups (old_chain);
1383 continue;
1384 }
1385
1386 discard_cleanups (old_chain);
1387 newobj->next = 0;
1388 **link_ptr_ptr = newobj;
1389 *link_ptr_ptr = &newobj->next;
1390 }
1391
1392 return 1;
1393 }
1394
1395 /* Read the full list of currently loaded shared objects directly
1396 from the inferior, without referring to any libraries read and
1397 stored by the probes interface. Handle special cases relating
1398 to the first elements of the list. */
1399
1400 static struct so_list *
1401 svr4_current_sos_direct (struct svr4_info *info)
1402 {
1403 CORE_ADDR lm;
1404 struct so_list *head = NULL;
1405 struct so_list **link_ptr = &head;
1406 struct cleanup *back_to;
1407 int ignore_first;
1408 struct svr4_library_list library_list;
1409
1410 /* Fall back to manual examination of the target if the packet is not
1411 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1412 tests a case where gdbserver cannot find the shared libraries list while
1413 GDB itself is able to find it via SYMFILE_OBJFILE.
1414
1415 Unfortunately statically linked inferiors will also fall back through this
1416 suboptimal code path. */
1417
1418 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1419 NULL);
1420 if (info->using_xfer)
1421 {
1422 if (library_list.main_lm)
1423 info->main_lm_addr = library_list.main_lm;
1424
1425 return library_list.head ? library_list.head : svr4_default_sos ();
1426 }
1427
1428 /* Always locate the debug struct, in case it has moved. */
1429 info->debug_base = 0;
1430 locate_base (info);
1431
1432 /* If we can't find the dynamic linker's base structure, this
1433 must not be a dynamically linked executable. Hmm. */
1434 if (! info->debug_base)
1435 return svr4_default_sos ();
1436
1437 /* Assume that everything is a library if the dynamic loader was loaded
1438 late by a static executable. */
1439 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1440 ignore_first = 0;
1441 else
1442 ignore_first = 1;
1443
1444 back_to = make_cleanup (svr4_free_library_list, &head);
1445
1446 /* Walk the inferior's link map list, and build our list of
1447 `struct so_list' nodes. */
1448 lm = solib_svr4_r_map (info);
1449 if (lm)
1450 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1451
1452 /* On Solaris, the dynamic linker is not in the normal list of
1453 shared objects, so make sure we pick it up too. Having
1454 symbol information for the dynamic linker is quite crucial
1455 for skipping dynamic linker resolver code. */
1456 lm = solib_svr4_r_ldsomap (info);
1457 if (lm)
1458 svr4_read_so_list (lm, 0, &link_ptr, 0);
1459
1460 discard_cleanups (back_to);
1461
1462 if (head == NULL)
1463 return svr4_default_sos ();
1464
1465 return head;
1466 }
1467
1468 /* Implement the main part of the "current_sos" target_so_ops
1469 method. */
1470
1471 static struct so_list *
1472 svr4_current_sos_1 (void)
1473 {
1474 struct svr4_info *info = get_svr4_info ();
1475
1476 /* If the solib list has been read and stored by the probes
1477 interface then we return a copy of the stored list. */
1478 if (info->solib_list != NULL)
1479 return svr4_copy_library_list (info->solib_list);
1480
1481 /* Otherwise obtain the solib list directly from the inferior. */
1482 return svr4_current_sos_direct (info);
1483 }
1484
1485 /* Implement the "current_sos" target_so_ops method. */
1486
1487 static struct so_list *
1488 svr4_current_sos (void)
1489 {
1490 struct so_list *so_head = svr4_current_sos_1 ();
1491 struct mem_range vsyscall_range;
1492
1493 /* Filter out the vDSO module, if present. Its symbol file would
1494 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1495 managed by symfile-mem.c:add_vsyscall_page. */
1496 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1497 && vsyscall_range.length != 0)
1498 {
1499 struct so_list **sop;
1500
1501 sop = &so_head;
1502 while (*sop != NULL)
1503 {
1504 struct so_list *so = *sop;
1505
1506 /* We can't simply match the vDSO by starting address alone,
1507 because lm_info->l_addr_inferior (and also l_addr) do not
1508 necessarily represent the real starting address of the
1509 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1510 field (the ".dynamic" section of the shared object)
1511 always points at the absolute/resolved address though.
1512 So check whether that address is inside the vDSO's
1513 mapping instead.
1514
1515 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1516 0-based ELF, and we see:
1517
1518 (gdb) info auxv
1519 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1520 (gdb) p/x *_r_debug.r_map.l_next
1521 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1522
1523 And on Linux 2.6.32 (x86_64) we see:
1524
1525 (gdb) info auxv
1526 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1527 (gdb) p/x *_r_debug.r_map.l_next
1528 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1529
1530 Dumping that vDSO shows:
1531
1532 (gdb) info proc mappings
1533 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1534 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1535 # readelf -Wa vdso.bin
1536 [...]
1537 Entry point address: 0xffffffffff700700
1538 [...]
1539 Section Headers:
1540 [Nr] Name Type Address Off Size
1541 [ 0] NULL 0000000000000000 000000 000000
1542 [ 1] .hash HASH ffffffffff700120 000120 000038
1543 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1544 [...]
1545 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1546 */
1547 if (address_in_mem_range (so->lm_info->l_ld, &vsyscall_range))
1548 {
1549 *sop = so->next;
1550 free_so (so);
1551 break;
1552 }
1553
1554 sop = &so->next;
1555 }
1556 }
1557
1558 return so_head;
1559 }
1560
1561 /* Get the address of the link_map for a given OBJFILE. */
1562
1563 CORE_ADDR
1564 svr4_fetch_objfile_link_map (struct objfile *objfile)
1565 {
1566 struct so_list *so;
1567 struct svr4_info *info = get_svr4_info ();
1568
1569 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1570 if (info->main_lm_addr == 0)
1571 solib_add (NULL, 0, &current_target, auto_solib_add);
1572
1573 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1574 if (objfile == symfile_objfile)
1575 return info->main_lm_addr;
1576
1577 /* The other link map addresses may be found by examining the list
1578 of shared libraries. */
1579 for (so = master_so_list (); so; so = so->next)
1580 if (so->objfile == objfile)
1581 return so->lm_info->lm_addr;
1582
1583 /* Not found! */
1584 return 0;
1585 }
1586
1587 /* On some systems, the only way to recognize the link map entry for
1588 the main executable file is by looking at its name. Return
1589 non-zero iff SONAME matches one of the known main executable names. */
1590
1591 static int
1592 match_main (const char *soname)
1593 {
1594 const char * const *mainp;
1595
1596 for (mainp = main_name_list; *mainp != NULL; mainp++)
1597 {
1598 if (strcmp (soname, *mainp) == 0)
1599 return (1);
1600 }
1601
1602 return (0);
1603 }
1604
1605 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1606 SVR4 run time loader. */
1607
1608 int
1609 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1610 {
1611 struct svr4_info *info = get_svr4_info ();
1612
1613 return ((pc >= info->interp_text_sect_low
1614 && pc < info->interp_text_sect_high)
1615 || (pc >= info->interp_plt_sect_low
1616 && pc < info->interp_plt_sect_high)
1617 || in_plt_section (pc)
1618 || in_gnu_ifunc_stub (pc));
1619 }
1620
1621 /* Given an executable's ABFD and target, compute the entry-point
1622 address. */
1623
1624 static CORE_ADDR
1625 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1626 {
1627 CORE_ADDR addr;
1628
1629 /* KevinB wrote ... for most targets, the address returned by
1630 bfd_get_start_address() is the entry point for the start
1631 function. But, for some targets, bfd_get_start_address() returns
1632 the address of a function descriptor from which the entry point
1633 address may be extracted. This address is extracted by
1634 gdbarch_convert_from_func_ptr_addr(). The method
1635 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1636 function for targets which don't use function descriptors. */
1637 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1638 bfd_get_start_address (abfd),
1639 targ);
1640 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1641 }
1642
1643 /* A probe and its associated action. */
1644
1645 struct probe_and_action
1646 {
1647 /* The probe. */
1648 struct probe *probe;
1649
1650 /* The relocated address of the probe. */
1651 CORE_ADDR address;
1652
1653 /* The action. */
1654 enum probe_action action;
1655 };
1656
1657 /* Returns a hash code for the probe_and_action referenced by p. */
1658
1659 static hashval_t
1660 hash_probe_and_action (const void *p)
1661 {
1662 const struct probe_and_action *pa = p;
1663
1664 return (hashval_t) pa->address;
1665 }
1666
1667 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1668 are equal. */
1669
1670 static int
1671 equal_probe_and_action (const void *p1, const void *p2)
1672 {
1673 const struct probe_and_action *pa1 = p1;
1674 const struct probe_and_action *pa2 = p2;
1675
1676 return pa1->address == pa2->address;
1677 }
1678
1679 /* Register a solib event probe and its associated action in the
1680 probes table. */
1681
1682 static void
1683 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1684 enum probe_action action)
1685 {
1686 struct svr4_info *info = get_svr4_info ();
1687 struct probe_and_action lookup, *pa;
1688 void **slot;
1689
1690 /* Create the probes table, if necessary. */
1691 if (info->probes_table == NULL)
1692 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1693 equal_probe_and_action,
1694 xfree, xcalloc, xfree);
1695
1696 lookup.probe = probe;
1697 lookup.address = address;
1698 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1699 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1700
1701 pa = XCNEW (struct probe_and_action);
1702 pa->probe = probe;
1703 pa->address = address;
1704 pa->action = action;
1705
1706 *slot = pa;
1707 }
1708
1709 /* Get the solib event probe at the specified location, and the
1710 action associated with it. Returns NULL if no solib event probe
1711 was found. */
1712
1713 static struct probe_and_action *
1714 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1715 {
1716 struct probe_and_action lookup;
1717 void **slot;
1718
1719 lookup.address = address;
1720 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1721
1722 if (slot == NULL)
1723 return NULL;
1724
1725 return (struct probe_and_action *) *slot;
1726 }
1727
1728 /* Decide what action to take when the specified solib event probe is
1729 hit. */
1730
1731 static enum probe_action
1732 solib_event_probe_action (struct probe_and_action *pa)
1733 {
1734 enum probe_action action;
1735 unsigned probe_argc;
1736 struct frame_info *frame = get_current_frame ();
1737
1738 action = pa->action;
1739 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1740 return action;
1741
1742 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1743
1744 /* Check that an appropriate number of arguments has been supplied.
1745 We expect:
1746 arg0: Lmid_t lmid (mandatory)
1747 arg1: struct r_debug *debug_base (mandatory)
1748 arg2: struct link_map *new (optional, for incremental updates) */
1749 probe_argc = get_probe_argument_count (pa->probe, frame);
1750 if (probe_argc == 2)
1751 action = FULL_RELOAD;
1752 else if (probe_argc < 2)
1753 action = PROBES_INTERFACE_FAILED;
1754
1755 return action;
1756 }
1757
1758 /* Populate the shared object list by reading the entire list of
1759 shared objects from the inferior. Handle special cases relating
1760 to the first elements of the list. Returns nonzero on success. */
1761
1762 static int
1763 solist_update_full (struct svr4_info *info)
1764 {
1765 free_solib_list (info);
1766 info->solib_list = svr4_current_sos_direct (info);
1767
1768 return 1;
1769 }
1770
1771 /* Update the shared object list starting from the link-map entry
1772 passed by the linker in the probe's third argument. Returns
1773 nonzero if the list was successfully updated, or zero to indicate
1774 failure. */
1775
1776 static int
1777 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1778 {
1779 struct so_list *tail;
1780 CORE_ADDR prev_lm;
1781
1782 /* svr4_current_sos_direct contains logic to handle a number of
1783 special cases relating to the first elements of the list. To
1784 avoid duplicating this logic we defer to solist_update_full
1785 if the list is empty. */
1786 if (info->solib_list == NULL)
1787 return 0;
1788
1789 /* Fall back to a full update if we are using a remote target
1790 that does not support incremental transfers. */
1791 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1792 return 0;
1793
1794 /* Walk to the end of the list. */
1795 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1796 /* Nothing. */;
1797 prev_lm = tail->lm_info->lm_addr;
1798
1799 /* Read the new objects. */
1800 if (info->using_xfer)
1801 {
1802 struct svr4_library_list library_list;
1803 char annex[64];
1804
1805 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1806 phex_nz (lm, sizeof (lm)),
1807 phex_nz (prev_lm, sizeof (prev_lm)));
1808 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1809 return 0;
1810
1811 tail->next = library_list.head;
1812 }
1813 else
1814 {
1815 struct so_list **link = &tail->next;
1816
1817 /* IGNORE_FIRST may safely be set to zero here because the
1818 above check and deferral to solist_update_full ensures
1819 that this call to svr4_read_so_list will never see the
1820 first element. */
1821 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1822 return 0;
1823 }
1824
1825 return 1;
1826 }
1827
1828 /* Disable the probes-based linker interface and revert to the
1829 original interface. We don't reset the breakpoints as the
1830 ones set up for the probes-based interface are adequate. */
1831
1832 static void
1833 disable_probes_interface_cleanup (void *arg)
1834 {
1835 struct svr4_info *info = get_svr4_info ();
1836
1837 warning (_("Probes-based dynamic linker interface failed.\n"
1838 "Reverting to original interface.\n"));
1839
1840 free_probes_table (info);
1841 free_solib_list (info);
1842 }
1843
1844 /* Update the solib list as appropriate when using the
1845 probes-based linker interface. Do nothing if using the
1846 standard interface. */
1847
1848 static void
1849 svr4_handle_solib_event (void)
1850 {
1851 struct svr4_info *info = get_svr4_info ();
1852 struct probe_and_action *pa;
1853 enum probe_action action;
1854 struct cleanup *old_chain, *usm_chain;
1855 struct value *val;
1856 CORE_ADDR pc, debug_base, lm = 0;
1857 int is_initial_ns;
1858 struct frame_info *frame = get_current_frame ();
1859
1860 /* Do nothing if not using the probes interface. */
1861 if (info->probes_table == NULL)
1862 return;
1863
1864 /* If anything goes wrong we revert to the original linker
1865 interface. */
1866 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1867
1868 pc = regcache_read_pc (get_current_regcache ());
1869 pa = solib_event_probe_at (info, pc);
1870 if (pa == NULL)
1871 {
1872 do_cleanups (old_chain);
1873 return;
1874 }
1875
1876 action = solib_event_probe_action (pa);
1877 if (action == PROBES_INTERFACE_FAILED)
1878 {
1879 do_cleanups (old_chain);
1880 return;
1881 }
1882
1883 if (action == DO_NOTHING)
1884 {
1885 discard_cleanups (old_chain);
1886 return;
1887 }
1888
1889 /* evaluate_probe_argument looks up symbols in the dynamic linker
1890 using find_pc_section. find_pc_section is accelerated by a cache
1891 called the section map. The section map is invalidated every
1892 time a shared library is loaded or unloaded, and if the inferior
1893 is generating a lot of shared library events then the section map
1894 will be updated every time svr4_handle_solib_event is called.
1895 We called find_pc_section in svr4_create_solib_event_breakpoints,
1896 so we can guarantee that the dynamic linker's sections are in the
1897 section map. We can therefore inhibit section map updates across
1898 these calls to evaluate_probe_argument and save a lot of time. */
1899 inhibit_section_map_updates (current_program_space);
1900 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1901 current_program_space);
1902
1903 val = evaluate_probe_argument (pa->probe, 1, frame);
1904 if (val == NULL)
1905 {
1906 do_cleanups (old_chain);
1907 return;
1908 }
1909
1910 debug_base = value_as_address (val);
1911 if (debug_base == 0)
1912 {
1913 do_cleanups (old_chain);
1914 return;
1915 }
1916
1917 /* Always locate the debug struct, in case it moved. */
1918 info->debug_base = 0;
1919 if (locate_base (info) == 0)
1920 {
1921 do_cleanups (old_chain);
1922 return;
1923 }
1924
1925 /* GDB does not currently support libraries loaded via dlmopen
1926 into namespaces other than the initial one. We must ignore
1927 any namespace other than the initial namespace here until
1928 support for this is added to GDB. */
1929 if (debug_base != info->debug_base)
1930 action = DO_NOTHING;
1931
1932 if (action == UPDATE_OR_RELOAD)
1933 {
1934 val = evaluate_probe_argument (pa->probe, 2, frame);
1935 if (val != NULL)
1936 lm = value_as_address (val);
1937
1938 if (lm == 0)
1939 action = FULL_RELOAD;
1940 }
1941
1942 /* Resume section map updates. */
1943 do_cleanups (usm_chain);
1944
1945 if (action == UPDATE_OR_RELOAD)
1946 {
1947 if (!solist_update_incremental (info, lm))
1948 action = FULL_RELOAD;
1949 }
1950
1951 if (action == FULL_RELOAD)
1952 {
1953 if (!solist_update_full (info))
1954 {
1955 do_cleanups (old_chain);
1956 return;
1957 }
1958 }
1959
1960 discard_cleanups (old_chain);
1961 }
1962
1963 /* Helper function for svr4_update_solib_event_breakpoints. */
1964
1965 static int
1966 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1967 {
1968 struct bp_location *loc;
1969
1970 if (b->type != bp_shlib_event)
1971 {
1972 /* Continue iterating. */
1973 return 0;
1974 }
1975
1976 for (loc = b->loc; loc != NULL; loc = loc->next)
1977 {
1978 struct svr4_info *info;
1979 struct probe_and_action *pa;
1980
1981 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1982 if (info == NULL || info->probes_table == NULL)
1983 continue;
1984
1985 pa = solib_event_probe_at (info, loc->address);
1986 if (pa == NULL)
1987 continue;
1988
1989 if (pa->action == DO_NOTHING)
1990 {
1991 if (b->enable_state == bp_disabled && stop_on_solib_events)
1992 enable_breakpoint (b);
1993 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1994 disable_breakpoint (b);
1995 }
1996
1997 break;
1998 }
1999
2000 /* Continue iterating. */
2001 return 0;
2002 }
2003
2004 /* Enable or disable optional solib event breakpoints as appropriate.
2005 Called whenever stop_on_solib_events is changed. */
2006
2007 static void
2008 svr4_update_solib_event_breakpoints (void)
2009 {
2010 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2011 }
2012
2013 /* Create and register solib event breakpoints. PROBES is an array
2014 of NUM_PROBES elements, each of which is vector of probes. A
2015 solib event breakpoint will be created and registered for each
2016 probe. */
2017
2018 static void
2019 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2020 VEC (probe_p) **probes,
2021 struct objfile *objfile)
2022 {
2023 int i;
2024
2025 for (i = 0; i < NUM_PROBES; i++)
2026 {
2027 enum probe_action action = probe_info[i].action;
2028 struct probe *probe;
2029 int ix;
2030
2031 for (ix = 0;
2032 VEC_iterate (probe_p, probes[i], ix, probe);
2033 ++ix)
2034 {
2035 CORE_ADDR address = get_probe_address (probe, objfile);
2036
2037 create_solib_event_breakpoint (gdbarch, address);
2038 register_solib_event_probe (probe, address, action);
2039 }
2040 }
2041
2042 svr4_update_solib_event_breakpoints ();
2043 }
2044
2045 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2046 before and after mapping and unmapping shared libraries. The sole
2047 purpose of this method is to allow debuggers to set a breakpoint so
2048 they can track these changes.
2049
2050 Some versions of the glibc dynamic linker contain named probes
2051 to allow more fine grained stopping. Given the address of the
2052 original marker function, this function attempts to find these
2053 probes, and if found, sets breakpoints on those instead. If the
2054 probes aren't found, a single breakpoint is set on the original
2055 marker function. */
2056
2057 static void
2058 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2059 CORE_ADDR address)
2060 {
2061 struct obj_section *os;
2062
2063 os = find_pc_section (address);
2064 if (os != NULL)
2065 {
2066 int with_prefix;
2067
2068 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2069 {
2070 VEC (probe_p) *probes[NUM_PROBES];
2071 int all_probes_found = 1;
2072 int checked_can_use_probe_arguments = 0;
2073 int i;
2074
2075 memset (probes, 0, sizeof (probes));
2076 for (i = 0; i < NUM_PROBES; i++)
2077 {
2078 const char *name = probe_info[i].name;
2079 struct probe *p;
2080 char buf[32];
2081
2082 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2083 shipped with an early version of the probes code in
2084 which the probes' names were prefixed with "rtld_"
2085 and the "map_failed" probe did not exist. The
2086 locations of the probes are otherwise the same, so
2087 we check for probes with prefixed names if probes
2088 with unprefixed names are not present. */
2089 if (with_prefix)
2090 {
2091 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2092 name = buf;
2093 }
2094
2095 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2096
2097 /* The "map_failed" probe did not exist in early
2098 versions of the probes code in which the probes'
2099 names were prefixed with "rtld_". */
2100 if (strcmp (name, "rtld_map_failed") == 0)
2101 continue;
2102
2103 if (VEC_empty (probe_p, probes[i]))
2104 {
2105 all_probes_found = 0;
2106 break;
2107 }
2108
2109 /* Ensure probe arguments can be evaluated. */
2110 if (!checked_can_use_probe_arguments)
2111 {
2112 p = VEC_index (probe_p, probes[i], 0);
2113 if (!can_evaluate_probe_arguments (p))
2114 {
2115 all_probes_found = 0;
2116 break;
2117 }
2118 checked_can_use_probe_arguments = 1;
2119 }
2120 }
2121
2122 if (all_probes_found)
2123 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2124
2125 for (i = 0; i < NUM_PROBES; i++)
2126 VEC_free (probe_p, probes[i]);
2127
2128 if (all_probes_found)
2129 return;
2130 }
2131 }
2132
2133 create_solib_event_breakpoint (gdbarch, address);
2134 }
2135
2136 /* Helper function for gdb_bfd_lookup_symbol. */
2137
2138 static int
2139 cmp_name_and_sec_flags (asymbol *sym, void *data)
2140 {
2141 return (strcmp (sym->name, (const char *) data) == 0
2142 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2143 }
2144 /* Arrange for dynamic linker to hit breakpoint.
2145
2146 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2147 debugger interface, support for arranging for the inferior to hit
2148 a breakpoint after mapping in the shared libraries. This function
2149 enables that breakpoint.
2150
2151 For SunOS, there is a special flag location (in_debugger) which we
2152 set to 1. When the dynamic linker sees this flag set, it will set
2153 a breakpoint at a location known only to itself, after saving the
2154 original contents of that place and the breakpoint address itself,
2155 in it's own internal structures. When we resume the inferior, it
2156 will eventually take a SIGTRAP when it runs into the breakpoint.
2157 We handle this (in a different place) by restoring the contents of
2158 the breakpointed location (which is only known after it stops),
2159 chasing around to locate the shared libraries that have been
2160 loaded, then resuming.
2161
2162 For SVR4, the debugger interface structure contains a member (r_brk)
2163 which is statically initialized at the time the shared library is
2164 built, to the offset of a function (_r_debug_state) which is guaran-
2165 teed to be called once before mapping in a library, and again when
2166 the mapping is complete. At the time we are examining this member,
2167 it contains only the unrelocated offset of the function, so we have
2168 to do our own relocation. Later, when the dynamic linker actually
2169 runs, it relocates r_brk to be the actual address of _r_debug_state().
2170
2171 The debugger interface structure also contains an enumeration which
2172 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2173 depending upon whether or not the library is being mapped or unmapped,
2174 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2175
2176 static int
2177 enable_break (struct svr4_info *info, int from_tty)
2178 {
2179 struct bound_minimal_symbol msymbol;
2180 const char * const *bkpt_namep;
2181 asection *interp_sect;
2182 char *interp_name;
2183 CORE_ADDR sym_addr;
2184
2185 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2186 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2187
2188 /* If we already have a shared library list in the target, and
2189 r_debug contains r_brk, set the breakpoint there - this should
2190 mean r_brk has already been relocated. Assume the dynamic linker
2191 is the object containing r_brk. */
2192
2193 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2194 sym_addr = 0;
2195 if (info->debug_base && solib_svr4_r_map (info) != 0)
2196 sym_addr = solib_svr4_r_brk (info);
2197
2198 if (sym_addr != 0)
2199 {
2200 struct obj_section *os;
2201
2202 sym_addr = gdbarch_addr_bits_remove
2203 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2204 sym_addr,
2205 &current_target));
2206
2207 /* On at least some versions of Solaris there's a dynamic relocation
2208 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2209 we get control before the dynamic linker has self-relocated.
2210 Check if SYM_ADDR is in a known section, if it is assume we can
2211 trust its value. This is just a heuristic though, it could go away
2212 or be replaced if it's getting in the way.
2213
2214 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2215 however it's spelled in your particular system) is ARM or Thumb.
2216 That knowledge is encoded in the address, if it's Thumb the low bit
2217 is 1. However, we've stripped that info above and it's not clear
2218 what all the consequences are of passing a non-addr_bits_remove'd
2219 address to svr4_create_solib_event_breakpoints. The call to
2220 find_pc_section verifies we know about the address and have some
2221 hope of computing the right kind of breakpoint to use (via
2222 symbol info). It does mean that GDB needs to be pointed at a
2223 non-stripped version of the dynamic linker in order to obtain
2224 information it already knows about. Sigh. */
2225
2226 os = find_pc_section (sym_addr);
2227 if (os != NULL)
2228 {
2229 /* Record the relocated start and end address of the dynamic linker
2230 text and plt section for svr4_in_dynsym_resolve_code. */
2231 bfd *tmp_bfd;
2232 CORE_ADDR load_addr;
2233
2234 tmp_bfd = os->objfile->obfd;
2235 load_addr = ANOFFSET (os->objfile->section_offsets,
2236 SECT_OFF_TEXT (os->objfile));
2237
2238 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2239 if (interp_sect)
2240 {
2241 info->interp_text_sect_low =
2242 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2243 info->interp_text_sect_high =
2244 info->interp_text_sect_low
2245 + bfd_section_size (tmp_bfd, interp_sect);
2246 }
2247 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2248 if (interp_sect)
2249 {
2250 info->interp_plt_sect_low =
2251 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2252 info->interp_plt_sect_high =
2253 info->interp_plt_sect_low
2254 + bfd_section_size (tmp_bfd, interp_sect);
2255 }
2256
2257 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2258 return 1;
2259 }
2260 }
2261
2262 /* Find the program interpreter; if not found, warn the user and drop
2263 into the old breakpoint at symbol code. */
2264 interp_name = find_program_interpreter ();
2265 if (interp_name)
2266 {
2267 CORE_ADDR load_addr = 0;
2268 int load_addr_found = 0;
2269 int loader_found_in_list = 0;
2270 struct so_list *so;
2271 bfd *tmp_bfd = NULL;
2272 struct target_ops *tmp_bfd_target;
2273
2274 sym_addr = 0;
2275
2276 /* Now we need to figure out where the dynamic linker was
2277 loaded so that we can load its symbols and place a breakpoint
2278 in the dynamic linker itself.
2279
2280 This address is stored on the stack. However, I've been unable
2281 to find any magic formula to find it for Solaris (appears to
2282 be trivial on GNU/Linux). Therefore, we have to try an alternate
2283 mechanism to find the dynamic linker's base address. */
2284
2285 TRY
2286 {
2287 tmp_bfd = solib_bfd_open (interp_name);
2288 }
2289 CATCH (ex, RETURN_MASK_ALL)
2290 {
2291 }
2292 END_CATCH
2293
2294 if (tmp_bfd == NULL)
2295 goto bkpt_at_symbol;
2296
2297 /* Now convert the TMP_BFD into a target. That way target, as
2298 well as BFD operations can be used. */
2299 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2300 /* target_bfd_reopen acquired its own reference, so we can
2301 release ours now. */
2302 gdb_bfd_unref (tmp_bfd);
2303
2304 /* On a running target, we can get the dynamic linker's base
2305 address from the shared library table. */
2306 so = master_so_list ();
2307 while (so)
2308 {
2309 if (svr4_same_1 (interp_name, so->so_original_name))
2310 {
2311 load_addr_found = 1;
2312 loader_found_in_list = 1;
2313 load_addr = lm_addr_check (so, tmp_bfd);
2314 break;
2315 }
2316 so = so->next;
2317 }
2318
2319 /* If we were not able to find the base address of the loader
2320 from our so_list, then try using the AT_BASE auxilliary entry. */
2321 if (!load_addr_found)
2322 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2323 {
2324 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2325
2326 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2327 that `+ load_addr' will overflow CORE_ADDR width not creating
2328 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2329 GDB. */
2330
2331 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2332 {
2333 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2334 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2335 tmp_bfd_target);
2336
2337 gdb_assert (load_addr < space_size);
2338
2339 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2340 64bit ld.so with 32bit executable, it should not happen. */
2341
2342 if (tmp_entry_point < space_size
2343 && tmp_entry_point + load_addr >= space_size)
2344 load_addr -= space_size;
2345 }
2346
2347 load_addr_found = 1;
2348 }
2349
2350 /* Otherwise we find the dynamic linker's base address by examining
2351 the current pc (which should point at the entry point for the
2352 dynamic linker) and subtracting the offset of the entry point.
2353
2354 This is more fragile than the previous approaches, but is a good
2355 fallback method because it has actually been working well in
2356 most cases. */
2357 if (!load_addr_found)
2358 {
2359 struct regcache *regcache
2360 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2361
2362 load_addr = (regcache_read_pc (regcache)
2363 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2364 }
2365
2366 if (!loader_found_in_list)
2367 {
2368 info->debug_loader_name = xstrdup (interp_name);
2369 info->debug_loader_offset_p = 1;
2370 info->debug_loader_offset = load_addr;
2371 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2372 }
2373
2374 /* Record the relocated start and end address of the dynamic linker
2375 text and plt section for svr4_in_dynsym_resolve_code. */
2376 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2377 if (interp_sect)
2378 {
2379 info->interp_text_sect_low =
2380 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2381 info->interp_text_sect_high =
2382 info->interp_text_sect_low
2383 + bfd_section_size (tmp_bfd, interp_sect);
2384 }
2385 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2386 if (interp_sect)
2387 {
2388 info->interp_plt_sect_low =
2389 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2390 info->interp_plt_sect_high =
2391 info->interp_plt_sect_low
2392 + bfd_section_size (tmp_bfd, interp_sect);
2393 }
2394
2395 /* Now try to set a breakpoint in the dynamic linker. */
2396 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2397 {
2398 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2399 (void *) *bkpt_namep);
2400 if (sym_addr != 0)
2401 break;
2402 }
2403
2404 if (sym_addr != 0)
2405 /* Convert 'sym_addr' from a function pointer to an address.
2406 Because we pass tmp_bfd_target instead of the current
2407 target, this will always produce an unrelocated value. */
2408 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2409 sym_addr,
2410 tmp_bfd_target);
2411
2412 /* We're done with both the temporary bfd and target. Closing
2413 the target closes the underlying bfd, because it holds the
2414 only remaining reference. */
2415 target_close (tmp_bfd_target);
2416
2417 if (sym_addr != 0)
2418 {
2419 svr4_create_solib_event_breakpoints (target_gdbarch (),
2420 load_addr + sym_addr);
2421 xfree (interp_name);
2422 return 1;
2423 }
2424
2425 /* For whatever reason we couldn't set a breakpoint in the dynamic
2426 linker. Warn and drop into the old code. */
2427 bkpt_at_symbol:
2428 xfree (interp_name);
2429 warning (_("Unable to find dynamic linker breakpoint function.\n"
2430 "GDB will be unable to debug shared library initializers\n"
2431 "and track explicitly loaded dynamic code."));
2432 }
2433
2434 /* Scan through the lists of symbols, trying to look up the symbol and
2435 set a breakpoint there. Terminate loop when we/if we succeed. */
2436
2437 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2438 {
2439 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2440 if ((msymbol.minsym != NULL)
2441 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2442 {
2443 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2444 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2445 sym_addr,
2446 &current_target);
2447 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2448 return 1;
2449 }
2450 }
2451
2452 if (interp_name != NULL && !current_inferior ()->attach_flag)
2453 {
2454 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2455 {
2456 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2457 if ((msymbol.minsym != NULL)
2458 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2459 {
2460 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2461 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2462 sym_addr,
2463 &current_target);
2464 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2465 return 1;
2466 }
2467 }
2468 }
2469 return 0;
2470 }
2471
2472 /* Implement the "special_symbol_handling" target_so_ops method. */
2473
2474 static void
2475 svr4_special_symbol_handling (void)
2476 {
2477 /* Nothing to do. */
2478 }
2479
2480 /* Read the ELF program headers from ABFD. Return the contents and
2481 set *PHDRS_SIZE to the size of the program headers. */
2482
2483 static gdb_byte *
2484 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2485 {
2486 Elf_Internal_Ehdr *ehdr;
2487 gdb_byte *buf;
2488
2489 ehdr = elf_elfheader (abfd);
2490
2491 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2492 if (*phdrs_size == 0)
2493 return NULL;
2494
2495 buf = xmalloc (*phdrs_size);
2496 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2497 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2498 {
2499 xfree (buf);
2500 return NULL;
2501 }
2502
2503 return buf;
2504 }
2505
2506 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2507 exec_bfd. Otherwise return 0.
2508
2509 We relocate all of the sections by the same amount. This
2510 behavior is mandated by recent editions of the System V ABI.
2511 According to the System V Application Binary Interface,
2512 Edition 4.1, page 5-5:
2513
2514 ... Though the system chooses virtual addresses for
2515 individual processes, it maintains the segments' relative
2516 positions. Because position-independent code uses relative
2517 addressesing between segments, the difference between
2518 virtual addresses in memory must match the difference
2519 between virtual addresses in the file. The difference
2520 between the virtual address of any segment in memory and
2521 the corresponding virtual address in the file is thus a
2522 single constant value for any one executable or shared
2523 object in a given process. This difference is the base
2524 address. One use of the base address is to relocate the
2525 memory image of the program during dynamic linking.
2526
2527 The same language also appears in Edition 4.0 of the System V
2528 ABI and is left unspecified in some of the earlier editions.
2529
2530 Decide if the objfile needs to be relocated. As indicated above, we will
2531 only be here when execution is stopped. But during attachment PC can be at
2532 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2533 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2534 regcache_read_pc would point to the interpreter and not the main executable.
2535
2536 So, to summarize, relocations are necessary when the start address obtained
2537 from the executable is different from the address in auxv AT_ENTRY entry.
2538
2539 [ The astute reader will note that we also test to make sure that
2540 the executable in question has the DYNAMIC flag set. It is my
2541 opinion that this test is unnecessary (undesirable even). It
2542 was added to avoid inadvertent relocation of an executable
2543 whose e_type member in the ELF header is not ET_DYN. There may
2544 be a time in the future when it is desirable to do relocations
2545 on other types of files as well in which case this condition
2546 should either be removed or modified to accomodate the new file
2547 type. - Kevin, Nov 2000. ] */
2548
2549 static int
2550 svr4_exec_displacement (CORE_ADDR *displacementp)
2551 {
2552 /* ENTRY_POINT is a possible function descriptor - before
2553 a call to gdbarch_convert_from_func_ptr_addr. */
2554 CORE_ADDR entry_point, displacement;
2555
2556 if (exec_bfd == NULL)
2557 return 0;
2558
2559 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2560 being executed themselves and PIE (Position Independent Executable)
2561 executables are ET_DYN. */
2562
2563 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2564 return 0;
2565
2566 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2567 return 0;
2568
2569 displacement = entry_point - bfd_get_start_address (exec_bfd);
2570
2571 /* Verify the DISPLACEMENT candidate complies with the required page
2572 alignment. It is cheaper than the program headers comparison below. */
2573
2574 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2575 {
2576 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2577
2578 /* p_align of PT_LOAD segments does not specify any alignment but
2579 only congruency of addresses:
2580 p_offset % p_align == p_vaddr % p_align
2581 Kernel is free to load the executable with lower alignment. */
2582
2583 if ((displacement & (elf->minpagesize - 1)) != 0)
2584 return 0;
2585 }
2586
2587 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2588 comparing their program headers. If the program headers in the auxilliary
2589 vector do not match the program headers in the executable, then we are
2590 looking at a different file than the one used by the kernel - for
2591 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2592
2593 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2594 {
2595 /* Be optimistic and clear OK only if GDB was able to verify the headers
2596 really do not match. */
2597 int phdrs_size, phdrs2_size, ok = 1;
2598 gdb_byte *buf, *buf2;
2599 int arch_size;
2600
2601 buf = read_program_header (-1, &phdrs_size, &arch_size);
2602 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2603 if (buf != NULL && buf2 != NULL)
2604 {
2605 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2606
2607 /* We are dealing with three different addresses. EXEC_BFD
2608 represents current address in on-disk file. target memory content
2609 may be different from EXEC_BFD as the file may have been prelinked
2610 to a different address after the executable has been loaded.
2611 Moreover the address of placement in target memory can be
2612 different from what the program headers in target memory say -
2613 this is the goal of PIE.
2614
2615 Detected DISPLACEMENT covers both the offsets of PIE placement and
2616 possible new prelink performed after start of the program. Here
2617 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2618 content offset for the verification purpose. */
2619
2620 if (phdrs_size != phdrs2_size
2621 || bfd_get_arch_size (exec_bfd) != arch_size)
2622 ok = 0;
2623 else if (arch_size == 32
2624 && phdrs_size >= sizeof (Elf32_External_Phdr)
2625 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2626 {
2627 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2628 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2629 CORE_ADDR displacement = 0;
2630 int i;
2631
2632 /* DISPLACEMENT could be found more easily by the difference of
2633 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2634 already have enough information to compute that displacement
2635 with what we've read. */
2636
2637 for (i = 0; i < ehdr2->e_phnum; i++)
2638 if (phdr2[i].p_type == PT_LOAD)
2639 {
2640 Elf32_External_Phdr *phdrp;
2641 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2642 CORE_ADDR vaddr, paddr;
2643 CORE_ADDR displacement_vaddr = 0;
2644 CORE_ADDR displacement_paddr = 0;
2645
2646 phdrp = &((Elf32_External_Phdr *) buf)[i];
2647 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2648 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2649
2650 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2651 byte_order);
2652 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2653
2654 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2655 byte_order);
2656 displacement_paddr = paddr - phdr2[i].p_paddr;
2657
2658 if (displacement_vaddr == displacement_paddr)
2659 displacement = displacement_vaddr;
2660
2661 break;
2662 }
2663
2664 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2665
2666 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2667 {
2668 Elf32_External_Phdr *phdrp;
2669 Elf32_External_Phdr *phdr2p;
2670 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2671 CORE_ADDR vaddr, paddr;
2672 asection *plt2_asect;
2673
2674 phdrp = &((Elf32_External_Phdr *) buf)[i];
2675 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2676 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2677 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2678
2679 /* PT_GNU_STACK is an exception by being never relocated by
2680 prelink as its addresses are always zero. */
2681
2682 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2683 continue;
2684
2685 /* Check also other adjustment combinations - PR 11786. */
2686
2687 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2688 byte_order);
2689 vaddr -= displacement;
2690 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2691
2692 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2693 byte_order);
2694 paddr -= displacement;
2695 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2696
2697 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2698 continue;
2699
2700 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2701 CentOS-5 has problems with filesz, memsz as well.
2702 See PR 11786. */
2703 if (phdr2[i].p_type == PT_GNU_RELRO)
2704 {
2705 Elf32_External_Phdr tmp_phdr = *phdrp;
2706 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2707
2708 memset (tmp_phdr.p_filesz, 0, 4);
2709 memset (tmp_phdr.p_memsz, 0, 4);
2710 memset (tmp_phdr.p_flags, 0, 4);
2711 memset (tmp_phdr.p_align, 0, 4);
2712 memset (tmp_phdr2.p_filesz, 0, 4);
2713 memset (tmp_phdr2.p_memsz, 0, 4);
2714 memset (tmp_phdr2.p_flags, 0, 4);
2715 memset (tmp_phdr2.p_align, 0, 4);
2716
2717 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2718 == 0)
2719 continue;
2720 }
2721
2722 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2723 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2724 if (plt2_asect)
2725 {
2726 int content2;
2727 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2728 CORE_ADDR filesz;
2729
2730 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2731 & SEC_HAS_CONTENTS) != 0;
2732
2733 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2734 byte_order);
2735
2736 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2737 FILESZ is from the in-memory image. */
2738 if (content2)
2739 filesz += bfd_get_section_size (plt2_asect);
2740 else
2741 filesz -= bfd_get_section_size (plt2_asect);
2742
2743 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2744 filesz);
2745
2746 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2747 continue;
2748 }
2749
2750 ok = 0;
2751 break;
2752 }
2753 }
2754 else if (arch_size == 64
2755 && phdrs_size >= sizeof (Elf64_External_Phdr)
2756 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2757 {
2758 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2759 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2760 CORE_ADDR displacement = 0;
2761 int i;
2762
2763 /* DISPLACEMENT could be found more easily by the difference of
2764 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2765 already have enough information to compute that displacement
2766 with what we've read. */
2767
2768 for (i = 0; i < ehdr2->e_phnum; i++)
2769 if (phdr2[i].p_type == PT_LOAD)
2770 {
2771 Elf64_External_Phdr *phdrp;
2772 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2773 CORE_ADDR vaddr, paddr;
2774 CORE_ADDR displacement_vaddr = 0;
2775 CORE_ADDR displacement_paddr = 0;
2776
2777 phdrp = &((Elf64_External_Phdr *) buf)[i];
2778 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2779 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2780
2781 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2782 byte_order);
2783 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2784
2785 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2786 byte_order);
2787 displacement_paddr = paddr - phdr2[i].p_paddr;
2788
2789 if (displacement_vaddr == displacement_paddr)
2790 displacement = displacement_vaddr;
2791
2792 break;
2793 }
2794
2795 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2796
2797 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2798 {
2799 Elf64_External_Phdr *phdrp;
2800 Elf64_External_Phdr *phdr2p;
2801 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2802 CORE_ADDR vaddr, paddr;
2803 asection *plt2_asect;
2804
2805 phdrp = &((Elf64_External_Phdr *) buf)[i];
2806 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2807 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2808 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2809
2810 /* PT_GNU_STACK is an exception by being never relocated by
2811 prelink as its addresses are always zero. */
2812
2813 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2814 continue;
2815
2816 /* Check also other adjustment combinations - PR 11786. */
2817
2818 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2819 byte_order);
2820 vaddr -= displacement;
2821 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2822
2823 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2824 byte_order);
2825 paddr -= displacement;
2826 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2827
2828 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2829 continue;
2830
2831 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2832 CentOS-5 has problems with filesz, memsz as well.
2833 See PR 11786. */
2834 if (phdr2[i].p_type == PT_GNU_RELRO)
2835 {
2836 Elf64_External_Phdr tmp_phdr = *phdrp;
2837 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2838
2839 memset (tmp_phdr.p_filesz, 0, 8);
2840 memset (tmp_phdr.p_memsz, 0, 8);
2841 memset (tmp_phdr.p_flags, 0, 4);
2842 memset (tmp_phdr.p_align, 0, 8);
2843 memset (tmp_phdr2.p_filesz, 0, 8);
2844 memset (tmp_phdr2.p_memsz, 0, 8);
2845 memset (tmp_phdr2.p_flags, 0, 4);
2846 memset (tmp_phdr2.p_align, 0, 8);
2847
2848 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2849 == 0)
2850 continue;
2851 }
2852
2853 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2854 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2855 if (plt2_asect)
2856 {
2857 int content2;
2858 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2859 CORE_ADDR filesz;
2860
2861 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2862 & SEC_HAS_CONTENTS) != 0;
2863
2864 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2865 byte_order);
2866
2867 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2868 FILESZ is from the in-memory image. */
2869 if (content2)
2870 filesz += bfd_get_section_size (plt2_asect);
2871 else
2872 filesz -= bfd_get_section_size (plt2_asect);
2873
2874 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2875 filesz);
2876
2877 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2878 continue;
2879 }
2880
2881 ok = 0;
2882 break;
2883 }
2884 }
2885 else
2886 ok = 0;
2887 }
2888
2889 xfree (buf);
2890 xfree (buf2);
2891
2892 if (!ok)
2893 return 0;
2894 }
2895
2896 if (info_verbose)
2897 {
2898 /* It can be printed repeatedly as there is no easy way to check
2899 the executable symbols/file has been already relocated to
2900 displacement. */
2901
2902 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2903 "displacement %s for \"%s\".\n"),
2904 paddress (target_gdbarch (), displacement),
2905 bfd_get_filename (exec_bfd));
2906 }
2907
2908 *displacementp = displacement;
2909 return 1;
2910 }
2911
2912 /* Relocate the main executable. This function should be called upon
2913 stopping the inferior process at the entry point to the program.
2914 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2915 different, the main executable is relocated by the proper amount. */
2916
2917 static void
2918 svr4_relocate_main_executable (void)
2919 {
2920 CORE_ADDR displacement;
2921
2922 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2923 probably contains the offsets computed using the PIE displacement
2924 from the previous run, which of course are irrelevant for this run.
2925 So we need to determine the new PIE displacement and recompute the
2926 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2927 already contains pre-computed offsets.
2928
2929 If we cannot compute the PIE displacement, either:
2930
2931 - The executable is not PIE.
2932
2933 - SYMFILE_OBJFILE does not match the executable started in the target.
2934 This can happen for main executable symbols loaded at the host while
2935 `ld.so --ld-args main-executable' is loaded in the target.
2936
2937 Then we leave the section offsets untouched and use them as is for
2938 this run. Either:
2939
2940 - These section offsets were properly reset earlier, and thus
2941 already contain the correct values. This can happen for instance
2942 when reconnecting via the remote protocol to a target that supports
2943 the `qOffsets' packet.
2944
2945 - The section offsets were not reset earlier, and the best we can
2946 hope is that the old offsets are still applicable to the new run. */
2947
2948 if (! svr4_exec_displacement (&displacement))
2949 return;
2950
2951 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2952 addresses. */
2953
2954 if (symfile_objfile)
2955 {
2956 struct section_offsets *new_offsets;
2957 int i;
2958
2959 new_offsets = alloca (symfile_objfile->num_sections
2960 * sizeof (*new_offsets));
2961
2962 for (i = 0; i < symfile_objfile->num_sections; i++)
2963 new_offsets->offsets[i] = displacement;
2964
2965 objfile_relocate (symfile_objfile, new_offsets);
2966 }
2967 else if (exec_bfd)
2968 {
2969 asection *asect;
2970
2971 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2972 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2973 (bfd_section_vma (exec_bfd, asect)
2974 + displacement));
2975 }
2976 }
2977
2978 /* Implement the "create_inferior_hook" target_solib_ops method.
2979
2980 For SVR4 executables, this first instruction is either the first
2981 instruction in the dynamic linker (for dynamically linked
2982 executables) or the instruction at "start" for statically linked
2983 executables. For dynamically linked executables, the system
2984 first exec's /lib/libc.so.N, which contains the dynamic linker,
2985 and starts it running. The dynamic linker maps in any needed
2986 shared libraries, maps in the actual user executable, and then
2987 jumps to "start" in the user executable.
2988
2989 We can arrange to cooperate with the dynamic linker to discover the
2990 names of shared libraries that are dynamically linked, and the base
2991 addresses to which they are linked.
2992
2993 This function is responsible for discovering those names and
2994 addresses, and saving sufficient information about them to allow
2995 their symbols to be read at a later time. */
2996
2997 static void
2998 svr4_solib_create_inferior_hook (int from_tty)
2999 {
3000 struct svr4_info *info;
3001
3002 info = get_svr4_info ();
3003
3004 /* Clear the probes-based interface's state. */
3005 free_probes_table (info);
3006 free_solib_list (info);
3007
3008 /* Relocate the main executable if necessary. */
3009 svr4_relocate_main_executable ();
3010
3011 /* No point setting a breakpoint in the dynamic linker if we can't
3012 hit it (e.g., a core file, or a trace file). */
3013 if (!target_has_execution)
3014 return;
3015
3016 if (!svr4_have_link_map_offsets ())
3017 return;
3018
3019 if (!enable_break (info, from_tty))
3020 return;
3021 }
3022
3023 static void
3024 svr4_clear_solib (void)
3025 {
3026 struct svr4_info *info;
3027
3028 info = get_svr4_info ();
3029 info->debug_base = 0;
3030 info->debug_loader_offset_p = 0;
3031 info->debug_loader_offset = 0;
3032 xfree (info->debug_loader_name);
3033 info->debug_loader_name = NULL;
3034 }
3035
3036 /* Clear any bits of ADDR that wouldn't fit in a target-format
3037 data pointer. "Data pointer" here refers to whatever sort of
3038 address the dynamic linker uses to manage its sections. At the
3039 moment, we don't support shared libraries on any processors where
3040 code and data pointers are different sizes.
3041
3042 This isn't really the right solution. What we really need here is
3043 a way to do arithmetic on CORE_ADDR values that respects the
3044 natural pointer/address correspondence. (For example, on the MIPS,
3045 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3046 sign-extend the value. There, simply truncating the bits above
3047 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3048 be a new gdbarch method or something. */
3049 static CORE_ADDR
3050 svr4_truncate_ptr (CORE_ADDR addr)
3051 {
3052 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3053 /* We don't need to truncate anything, and the bit twiddling below
3054 will fail due to overflow problems. */
3055 return addr;
3056 else
3057 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3058 }
3059
3060
3061 static void
3062 svr4_relocate_section_addresses (struct so_list *so,
3063 struct target_section *sec)
3064 {
3065 bfd *abfd = sec->the_bfd_section->owner;
3066
3067 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3068 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3069 }
3070 \f
3071
3072 /* Architecture-specific operations. */
3073
3074 /* Per-architecture data key. */
3075 static struct gdbarch_data *solib_svr4_data;
3076
3077 struct solib_svr4_ops
3078 {
3079 /* Return a description of the layout of `struct link_map'. */
3080 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3081 };
3082
3083 /* Return a default for the architecture-specific operations. */
3084
3085 static void *
3086 solib_svr4_init (struct obstack *obstack)
3087 {
3088 struct solib_svr4_ops *ops;
3089
3090 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3091 ops->fetch_link_map_offsets = NULL;
3092 return ops;
3093 }
3094
3095 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3096 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3097
3098 void
3099 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3100 struct link_map_offsets *(*flmo) (void))
3101 {
3102 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
3103
3104 ops->fetch_link_map_offsets = flmo;
3105
3106 set_solib_ops (gdbarch, &svr4_so_ops);
3107 }
3108
3109 /* Fetch a link_map_offsets structure using the architecture-specific
3110 `struct link_map_offsets' fetcher. */
3111
3112 static struct link_map_offsets *
3113 svr4_fetch_link_map_offsets (void)
3114 {
3115 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3116
3117 gdb_assert (ops->fetch_link_map_offsets);
3118 return ops->fetch_link_map_offsets ();
3119 }
3120
3121 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3122
3123 static int
3124 svr4_have_link_map_offsets (void)
3125 {
3126 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3127
3128 return (ops->fetch_link_map_offsets != NULL);
3129 }
3130 \f
3131
3132 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3133 `struct r_debug' and a `struct link_map' that are binary compatible
3134 with the origional SVR4 implementation. */
3135
3136 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3137 for an ILP32 SVR4 system. */
3138
3139 struct link_map_offsets *
3140 svr4_ilp32_fetch_link_map_offsets (void)
3141 {
3142 static struct link_map_offsets lmo;
3143 static struct link_map_offsets *lmp = NULL;
3144
3145 if (lmp == NULL)
3146 {
3147 lmp = &lmo;
3148
3149 lmo.r_version_offset = 0;
3150 lmo.r_version_size = 4;
3151 lmo.r_map_offset = 4;
3152 lmo.r_brk_offset = 8;
3153 lmo.r_ldsomap_offset = 20;
3154
3155 /* Everything we need is in the first 20 bytes. */
3156 lmo.link_map_size = 20;
3157 lmo.l_addr_offset = 0;
3158 lmo.l_name_offset = 4;
3159 lmo.l_ld_offset = 8;
3160 lmo.l_next_offset = 12;
3161 lmo.l_prev_offset = 16;
3162 }
3163
3164 return lmp;
3165 }
3166
3167 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3168 for an LP64 SVR4 system. */
3169
3170 struct link_map_offsets *
3171 svr4_lp64_fetch_link_map_offsets (void)
3172 {
3173 static struct link_map_offsets lmo;
3174 static struct link_map_offsets *lmp = NULL;
3175
3176 if (lmp == NULL)
3177 {
3178 lmp = &lmo;
3179
3180 lmo.r_version_offset = 0;
3181 lmo.r_version_size = 4;
3182 lmo.r_map_offset = 8;
3183 lmo.r_brk_offset = 16;
3184 lmo.r_ldsomap_offset = 40;
3185
3186 /* Everything we need is in the first 40 bytes. */
3187 lmo.link_map_size = 40;
3188 lmo.l_addr_offset = 0;
3189 lmo.l_name_offset = 8;
3190 lmo.l_ld_offset = 16;
3191 lmo.l_next_offset = 24;
3192 lmo.l_prev_offset = 32;
3193 }
3194
3195 return lmp;
3196 }
3197 \f
3198
3199 struct target_so_ops svr4_so_ops;
3200
3201 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3202 different rule for symbol lookup. The lookup begins here in the DSO, not in
3203 the main executable. */
3204
3205 static struct symbol *
3206 elf_lookup_lib_symbol (struct objfile *objfile,
3207 const char *name,
3208 const domain_enum domain)
3209 {
3210 bfd *abfd;
3211
3212 if (objfile == symfile_objfile)
3213 abfd = exec_bfd;
3214 else
3215 {
3216 /* OBJFILE should have been passed as the non-debug one. */
3217 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3218
3219 abfd = objfile->obfd;
3220 }
3221
3222 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3223 return NULL;
3224
3225 return lookup_global_symbol_from_objfile (objfile, name, domain);
3226 }
3227
3228 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3229
3230 void
3231 _initialize_svr4_solib (void)
3232 {
3233 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3234 solib_svr4_pspace_data
3235 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3236
3237 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3238 svr4_so_ops.free_so = svr4_free_so;
3239 svr4_so_ops.clear_so = svr4_clear_so;
3240 svr4_so_ops.clear_solib = svr4_clear_solib;
3241 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3242 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3243 svr4_so_ops.current_sos = svr4_current_sos;
3244 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3245 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3246 svr4_so_ops.bfd_open = solib_bfd_open;
3247 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3248 svr4_so_ops.same = svr4_same;
3249 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3250 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3251 svr4_so_ops.handle_event = svr4_handle_solib_event;
3252 }
This page took 0.160476 seconds and 4 git commands to generate.